2023-08-25 13:48:45,614 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors HTTP/1.1" 200 68 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,619 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,619 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del HTTP/1.1" 200 1499 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,619 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink HTTP/1.1" 200 1302 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,620 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink HTTP/1.1" 200 1302 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,621 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink/status HTTP/1.1" 200 2230 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,622 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,622 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del/status HTTP/1.1" 200 2234 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,623 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink/status HTTP/1.1" 200 2230 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,624 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink/config HTTP/1.1" 200 1205 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,625 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src/config HTTP/1.1" 200 2259 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,626 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del/config HTTP/1.1" 200 1394 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,627 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink/config HTTP/1.1" 200 1205 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,628 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink/tasks HTTP/1.1" 200 1326 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,629 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,629 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del/tasks HTTP/1.1" 200 1519 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,630 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink/tasks HTTP/1.1" 200 1326 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,631 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,632 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,632 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,633 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,634 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,635 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau33_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,635 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink_del/topics HTTP/1.1" 200 79 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:45,636 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:45 +0000] "GET /connectors/vk_nau32_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:48,661 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:48 +0000] "GET /connectors/vk_nau32_sink HTTP/1.1" 200 1302 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:48,662 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:48 +0000] "GET /connectors/vk_nau32_sink/tasks HTTP/1.1" 200 1326 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:48,663 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:48 +0000] "GET /connectors/vk_nau32_sink/status HTTP/1.1" 200 2230 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:48,664 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:48 +0000] "GET /connectors/vk_nau32_sink/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:51,295 INFO || [AdminClient clientId=naument--shared-admin] Node 2 disconnected. [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:48:52,635 INFO || [AdminClient clientId=connector-adminclient-vk_nau33_src-0] Node 1 disconnected. [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:48:55,108 INFO || Successfully processed removal of connector 'vk_nau32_sink' [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:48:55,108 INFO || [Worker clientId=connect-1, groupId=naument] Connector vk_nau32_sink config removed [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,109 INFO || [Worker clientId=connect-1, groupId=naument] Handling connector-only config update by stopping connector vk_nau32_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,109 INFO || Stopping connector vk_nau32_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:48:55,109 INFO || Scheduled shutdown for WorkerConnector{id=vk_nau32_sink} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:48:55,109 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "DELETE /connectors/vk_nau32_sink HTTP/1.1" 204 0 "-" "ReactorNetty/1.1.6" 7 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,110 INFO || Completed shutdown for WorkerConnector{id=vk_nau32_sink} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:48:55,110 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,110 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,112 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=2, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,116 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=2, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,117 INFO || Stopping connector vk_nau32_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:48:55,117 WARN || Ignoring stop request for unowned connector vk_nau32_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:48:55,117 WARN || Ignoring await stop request for non-present connector vk_nau32_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:48:55,117 INFO || Stopping task vk_nau32_sink-0 [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:48:55,117 INFO || [Worker clientId=connect-1, groupId=naument] Finished stopping tasks in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,120 INFO || [Worker clientId=connect-1, groupId=naument] Finished flushing status backing store in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,120 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 2 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2356, connectorIds=[vk_nau33_src, vk_nau33_sink, vk_nau32_sink_del], taskIds=[vk_nau33_src-0, vk_nau33_sink-0, vk_nau32_sink_del-0], revokedConnectorIds=[vk_nau32_sink], revokedTaskIds=[vk_nau32_sink-0], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,121 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2356 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,121 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,121 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,121 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,122 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=3, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,124 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=3, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:48:55,125 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 3 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2356, connectorIds=[vk_nau33_src, vk_nau33_sink, vk_nau32_sink_del], taskIds=[vk_nau33_src-0, vk_nau33_sink-0, vk_nau32_sink_del-0], revokedConnectorIds=[], revokedTaskIds=[], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,125 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2356 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,125 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:48:55,134 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors HTTP/1.1" 200 52 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,136 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,136 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del HTTP/1.1" 200 1499 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,136 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink HTTP/1.1" 200 1302 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,137 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,138 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del/status HTTP/1.1" 200 2234 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,139 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink/status HTTP/1.1" 200 2230 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,140 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src/config HTTP/1.1" 200 2259 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,141 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del/config HTTP/1.1" 200 1394 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,141 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink/config HTTP/1.1" 200 1205 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,142 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,144 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del/tasks HTTP/1.1" 200 1519 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,146 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink/tasks HTTP/1.1" 200 1326 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,148 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,148 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,148 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,148 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,149 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau32_sink_del/topics HTTP/1.1" 200 79 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:55,150 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:55 +0000] "GET /connectors/vk_nau33_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:57,366 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:57 +0000] "GET /connectors/vk_nau32_sink_del/tasks HTTP/1.1" 200 1519 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:57,366 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:57 +0000] "GET /connectors/vk_nau32_sink_del HTTP/1.1" 200 1499 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:57,368 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:57 +0000] "GET /connectors/vk_nau32_sink_del/status HTTP/1.1" 200 2234 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:48:57,369 INFO || 10.0.2.141 - - [25/Aug/2023:10:48:57 +0000] "GET /connectors/vk_nau32_sink_del/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,103 INFO || Successfully processed removal of connector 'vk_nau32_sink_del' [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:49:01,103 INFO || [Worker clientId=connect-1, groupId=naument] Connector vk_nau32_sink_del config removed [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,103 INFO || [Worker clientId=connect-1, groupId=naument] Handling connector-only config update by stopping connector vk_nau32_sink_del [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,103 INFO || Stopping connector vk_nau32_sink_del [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:01,103 INFO || Scheduled shutdown for WorkerConnector{id=vk_nau32_sink_del} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:01,104 INFO || Completed shutdown for WorkerConnector{id=vk_nau32_sink_del} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:01,104 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "DELETE /connectors/vk_nau32_sink_del HTTP/1.1" 204 0 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,104 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,104 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,105 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=4, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,107 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=4, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,107 INFO || Stopping connector vk_nau32_sink_del [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:01,107 WARN || Ignoring stop request for unowned connector vk_nau32_sink_del [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:01,107 WARN || Ignoring await stop request for non-present connector vk_nau32_sink_del [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:01,107 INFO || Stopping task vk_nau32_sink_del-0 [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:01,107 INFO || [Worker clientId=connect-1, groupId=naument] Finished stopping tasks in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,108 INFO || [Worker clientId=connect-1, groupId=naument] Finished flushing status backing store in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,108 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 4 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2358, connectorIds=[vk_nau33_src, vk_nau33_sink], taskIds=[vk_nau33_src-0, vk_nau33_sink-0], revokedConnectorIds=[vk_nau32_sink_del], revokedTaskIds=[vk_nau32_sink_del-0], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,108 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2358 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,108 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,109 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,109 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,109 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=5, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,111 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=5, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:01,111 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 5 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2358, connectorIds=[vk_nau33_src, vk_nau33_sink], taskIds=[vk_nau33_src-0, vk_nau33_sink-0], revokedConnectorIds=[], revokedTaskIds=[], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,111 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2358 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,111 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:01,123 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,125 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink HTTP/1.1" 200 1302 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,126 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,127 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink/status HTTP/1.1" 200 2230 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,128 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,129 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink/config HTTP/1.1" 200 1205 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,130 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src/config HTTP/1.1" 200 2259 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,130 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink/tasks HTTP/1.1" 200 1326 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,131 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,132 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink/tasks/0/status HTTP/1.1" 200 2118 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,133 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,134 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:01,134 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:01 +0000] "GET /connectors/vk_nau33_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,333 INFO || Successfully processed removal of connector 'vk_nau33_sink' [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:49:06,333 INFO || [Worker clientId=connect-1, groupId=naument] Connector vk_nau33_sink config removed [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,333 INFO || [Worker clientId=connect-1, groupId=naument] Handling connector-only config update by stopping connector vk_nau33_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,333 INFO || Stopping connector vk_nau33_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:06,333 INFO || Scheduled shutdown for WorkerConnector{id=vk_nau33_sink} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:06,333 INFO || Completed shutdown for WorkerConnector{id=vk_nau33_sink} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:06,334 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,334 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,334 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "DELETE /connectors/vk_nau33_sink HTTP/1.1" 204 0 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,335 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=6, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,336 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=6, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,337 INFO || Stopping connector vk_nau33_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:06,337 WARN || Ignoring stop request for unowned connector vk_nau33_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:06,337 INFO || Stopping task vk_nau33_sink-0 [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:06,337 WARN || Ignoring await stop request for non-present connector vk_nau33_sink [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:06,337 INFO || [Worker clientId=connect-1, groupId=naument] Finished stopping tasks in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] Finished flushing status backing store in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 6 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2360, connectorIds=[vk_nau33_src], taskIds=[vk_nau33_src-0], revokedConnectorIds=[vk_nau33_sink], revokedTaskIds=[vk_nau33_sink-0], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2360 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,338 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,339 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=7, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,341 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=7, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:06,341 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 7 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2360, connectorIds=[vk_nau33_src], taskIds=[vk_nau33_src-0], revokedConnectorIds=[], revokedTaskIds=[], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,341 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2360 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,341 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:06,343 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors HTTP/1.1" 200 16 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,345 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,347 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,349 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src/config HTTP/1.1" 200 2259 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,351 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,353 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:06,354 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:06 +0000] "GET /connectors/vk_nau33_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,210 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors HTTP/1.1" 200 16 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,212 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,214 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,216 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src/config HTTP/1.1" 200 2259 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,218 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,219 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 0 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:16,221 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:16 +0000] "GET /connectors/vk_nau33_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:17,386 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:17 +0000] "GET /connectors/vk_nau33_src HTTP/1.1" 200 2356 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:17,386 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:17 +0000] "GET /connectors/vk_nau33_src/tasks HTTP/1.1" 200 2379 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:17,388 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:17 +0000] "GET /connectors/vk_nau33_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:17,389 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:17 +0000] "GET /connectors/vk_nau33_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:21,531 INFO || Successfully processed removal of connector 'vk_nau33_src' [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:49:21,531 INFO || [Worker clientId=connect-1, groupId=naument] Connector vk_nau33_src config removed [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:21,531 INFO || [Worker clientId=connect-1, groupId=naument] Handling connector-only config update by stopping connector vk_nau33_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:21,531 INFO || Stopping connector vk_nau33_src [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:21,531 INFO || Scheduled shutdown for WorkerConnector{id=vk_nau33_src} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:21,531 INFO || Completed shutdown for WorkerConnector{id=vk_nau33_src} [org.apache.kafka.connect.runtime.WorkerConnector] 2023-08-25 13:49:21,531 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:21 +0000] "DELETE /connectors/vk_nau33_src HTTP/1.1" 204 0 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:21,533 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:21,533 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:21,534 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=8, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:21,536 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=8, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:21,536 INFO || Stopping connector vk_nau33_src [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:21,536 WARN || Ignoring stop request for unowned connector vk_nau33_src [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:21,536 INFO || Stopping task vk_nau33_src-0 [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:21,536 WARN || Ignoring await stop request for non-present connector vk_nau33_src [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:49:21,549 INFO || 10.0.2.141 - - [25/Aug/2023:10:49:21 +0000] "GET /connectors HTTP/1.1" 200 2 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:49:21,777 INFO || Stopping down connector [io.debezium.connector.common.BaseSourceTask] 2023-08-25 13:49:23,193 INFO Oracle|vk_nau33|streaming startScn=289931545938, endScn=289931545941 [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:49:23,250 INFO Oracle|vk_nau33|streaming Streaming metrics dump: OracleStreamingChangeEventSourceMetrics{currentScn=289931545941, oldestScn=-1, committedScn=289931545938, offsetScn=289931545916, logMinerQueryCount=656, totalProcessedRows=319000, totalCapturedDmlCount=0, totalDurationOfFetchingQuery=PT3M59.320943S, lastCapturedDmlCount=0, lastDurationOfFetchingQuery=PT0.07419S, maxCapturedDmlCount=0, maxDurationOfFetchingQuery=PT8.176507S, totalBatchProcessingDuration=PT8M42.619821S, lastBatchProcessingDuration=PT0.092028S, maxBatchProcessingThroughput=0, currentLogFileName=[/u04/oradata/naument1/redo_03b.log, /u02/oradata/naument1/redo_03a.log], minLogFilesMined=2, maxLogFilesMined=2, redoLogStatus=[/u02/oradata/naument1/redo_07a.log | ACTIVE, /u04/oradata/naument1/redo_07b.log | ACTIVE, /u02/oradata/naument1/redo_06a.log | ACTIVE, /u04/oradata/naument1/redo_06b.log | ACTIVE, /u02/oradata/naument1/redo_05a.log | ACTIVE, /u04/oradata/naument1/redo_05b.log | ACTIVE, /u04/oradata/naument1/redo_01b.log | ACTIVE, /u04/oradata/naument1/redo_04b.log | ACTIVE, /u02/oradata/naument1/redo_04a.log | ACTIVE, /u02/oradata/naument1/redo_01a.log | ACTIVE, /u02/oradata/naument1/redo_02a.log | ACTIVE, /u04/oradata/naument1/redo_02b.log | ACTIVE, /u04/oradata/naument1/redo_03b.log | CURRENT, /u02/oradata/naument1/redo_03a.log | CURRENT], switchCounter=0, batchSize=20000, millisecondToSleepBetweenMiningQuery=3000, keepTransactionsDuration=PT0S, networkConnectionProblemsCounter0, batchSizeDefault=20000, batchSizeMin=1000, batchSizeMax=100000, sleepTimeDefault=1000, sleepTimeMin=0, sleepTimeMax=3000, sleepTimeIncrement=200, totalParseTime=PT0S, totalStartLogMiningSessionDuration=PT3M48.392022S, lastStartLogMiningSessionDuration=PT0.008584S, maxStartLogMiningSessionDuration=PT14.597725S, totalProcessTime=PT8M42.619821S, minBatchProcessTime=PT0.084316S, maxBatchProcessTime=PT18.575361S, totalResultSetNextTime=PT13.745653S, lagFromTheSource=DurationPT1.186127S, maxLagFromTheSourceDuration=PT27.414294S, minLagFromTheSourceDuration=PT0.125003S, lastCommitDuration=PT0.000001S, maxCommitDuration=PT0.001004S, activeTransactions=0, rolledBackTransactions=1542, oversizedTransactions=0, committedTransactions=152689, abandonedTransactionIds={}, rolledbackTransactionIds={0d00030026db2900=0d00030026db2900, 11001600d3322b00=11001600d3322b00, 0b000600b40f2f00=0b000600b40f2f00, 050017003b732b00=050017003b732b00, 0e0010005b3e2900=0e0010005b3e2900, 0f0019003cf62900=0f0019003cf62900, 0300190099b62800=0300190099b62800, 07000000feca2d00=07000000feca2d00, 0b001500f80b2f00=0b001500f80b2f00, 0c002000a1582b00=0c002000a1582b00}, registeredDmlCount=0, committedDmlCount=0, errorCount=0, warningCount=0, scnFreezeCount=0, unparsableDdlCount=0, miningSessionUserGlobalAreaMemory=34402280, miningSessionUserGlobalAreaMaxMemory=55972872, miningSessionProcessGlobalAreaMemory=105102152, miningSessionProcessGlobalAreaMaxMemory=122403656} [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:49:23,250 INFO Oracle|vk_nau33|streaming Offsets: OracleOffsetContext [scn=289931545938, commit_scn=["289931545938:1:02000d0072ce2700"]] [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:49:23,250 INFO Oracle|vk_nau33|streaming Finished streaming [io.debezium.pipeline.ChangeEventSourceCoordinator] 2023-08-25 13:49:23,250 INFO Oracle|vk_nau33|streaming Connected metrics set to 'false' [io.debezium.pipeline.ChangeEventSourceCoordinator] 2023-08-25 13:49:23,251 INFO || SignalProcessor stopped [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:49:23,274 INFO || Connection gracefully closed [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:49:23,274 INFO || [Producer clientId=vk_nau33-schemahistory] Closing the Kafka producer with timeoutMillis = 30000 ms. [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:49:23,275 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,275 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,275 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,275 INFO || App info kafka.producer for vk_nau33-schemahistory unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:49:23,276 INFO || [Producer clientId=connector-producer-vk_nau33_src-0] Closing the Kafka producer with timeoutMillis = 30000 ms. [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:49:23,277 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,277 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,277 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,277 INFO || App info kafka.producer for connector-producer-vk_nau33_src-0 unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:49:23,277 INFO || App info kafka.admin.client for connector-adminclient-vk_nau33_src-0 unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:49:23,278 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,278 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,278 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:49:23,280 INFO || [Worker clientId=connect-1, groupId=naument] Finished stopping tasks in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,281 INFO || [Worker clientId=connect-1, groupId=naument] Finished flushing status backing store in preparation for rebalance [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,281 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 8 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2362, connectorIds=[], taskIds=[], revokedConnectorIds=[vk_nau33_src], revokedTaskIds=[vk_nau33_src-0], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,282 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2362 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,282 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,282 INFO || [Worker clientId=connect-1, groupId=naument] Rebalance started [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:23,282 INFO || [Worker clientId=connect-1, groupId=naument] (Re-)joining group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:23,283 INFO || [Worker clientId=connect-1, groupId=naument] Successfully joined group with generation Generation{generationId=9, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:23,288 INFO || [Worker clientId=connect-1, groupId=naument] Successfully synced group in generation Generation{generationId=9, memberId='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', protocol='sessioned'} [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:49:23,289 INFO || [Worker clientId=connect-1, groupId=naument] Joined group at generation 9 with protocol version 2 and got assignment: Assignment{error=0, leader='connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1', leaderUrl='http://172.18.0.10:8083/', offset=2362, connectorIds=[], taskIds=[], revokedConnectorIds=[], revokedTaskIds=[], delay=0} with rebalance delay: 0 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,289 INFO || [Worker clientId=connect-1, groupId=naument] Starting connectors and tasks using config offset 2362 [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:49:23,289 INFO || [Worker clientId=connect-1, groupId=naument] Finished starting connectors and tasks [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:51:06,269 INFO || Kafka Connect stopping [org.apache.kafka.connect.runtime.Connect] 2023-08-25 13:51:06,269 INFO || Stopping REST server [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:51:06,273 INFO || Stopped http_8083@7a47f0be{HTTP/1.1, (http/1.1)}{0.0.0.0:8083} [org.eclipse.jetty.server.AbstractConnector] 2023-08-25 13:51:06,273 INFO || node0 Stopped scavenging [org.eclipse.jetty.server.session] 2023-08-25 13:51:06,274 INFO || REST server stopped [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:51:06,274 INFO || [Worker clientId=connect-1, groupId=naument] Herder stopping [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:51:06,275 INFO || [Worker clientId=connect-1, groupId=naument] Stopping connectors and tasks that are still assigned to this worker. [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:51:06,275 INFO || [Worker clientId=connect-1, groupId=naument] Member connect-1-0a3238d9-363b-448f-a550-6168c7edb8d1 sending LeaveGroup request to coordinator broker3:29092 (id: 2147483644 rack: null) due to the consumer is being closed [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:51:06,275 INFO || [Worker clientId=connect-1, groupId=naument] Resetting generation and member id due to: consumer pro-actively leaving the group [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:51:06,275 WARN || [Worker clientId=connect-1, groupId=naument] Close timed out with 1 pending requests to coordinator, terminating client connections [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:51:06,275 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,275 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,275 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,276 INFO || App info kafka.connect for connect-1 unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:06,276 INFO || Stopping KafkaBasedLog for topic naument-status [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:06,276 INFO || [Producer clientId=naument--statuses] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms. [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:51:06,277 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,277 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,277 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,277 INFO || App info kafka.producer for naument--statuses unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:06,277 INFO || [Consumer clientId=naument--statuses, groupId=naument] Resetting generation and member id due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:06,277 INFO || [Consumer clientId=naument--statuses, groupId=naument] Request joining group due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:06,742 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,742 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,742 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,743 INFO || App info kafka.consumer for naument--statuses unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:06,743 INFO || Stopped KafkaBasedLog for topic naument-status [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:06,743 INFO || Closing KafkaConfigBackingStore [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:51:06,744 INFO || Stopping KafkaBasedLog for topic naument-config [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:06,744 INFO || [Producer clientId=naument--configs] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms. [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:51:06,745 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,745 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,745 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:06,745 INFO || App info kafka.producer for naument--configs unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:06,745 INFO || [Consumer clientId=naument--configs, groupId=naument] Resetting generation and member id due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:06,745 INFO || [Consumer clientId=naument--configs, groupId=naument] Request joining group due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:07,132 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,132 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,132 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,133 INFO || App info kafka.consumer for naument--configs unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:07,133 INFO || Stopped KafkaBasedLog for topic naument-config [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:07,133 INFO || Closed KafkaConfigBackingStore [org.apache.kafka.connect.storage.KafkaConfigBackingStore] 2023-08-25 13:51:07,133 INFO || Worker stopping [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:51:07,134 INFO || Stopping KafkaOffsetBackingStore [org.apache.kafka.connect.storage.KafkaOffsetBackingStore] 2023-08-25 13:51:07,134 INFO || Stopping KafkaBasedLog for topic naument-offset [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:07,134 INFO || [Producer clientId=naument--offsets] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms. [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:51:07,135 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,135 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,135 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,135 INFO || App info kafka.producer for naument--offsets unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:07,135 INFO || [Consumer clientId=naument--offsets, groupId=naument] Resetting generation and member id due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:07,135 INFO || [Consumer clientId=naument--offsets, groupId=naument] Request joining group due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:51:07,559 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,559 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,559 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,560 INFO || App info kafka.consumer for naument--offsets unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:07,560 INFO || Stopped KafkaBasedLog for topic naument-offset [org.apache.kafka.connect.util.KafkaBasedLog] 2023-08-25 13:51:07,560 INFO || Stopped KafkaOffsetBackingStore [org.apache.kafka.connect.storage.KafkaOffsetBackingStore] 2023-08-25 13:51:07,560 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,560 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,560 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,560 INFO || App info kafka.connect for 172.18.0.10:8083 unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:07,560 INFO || Worker stopped [org.apache.kafka.connect.runtime.Worker] 2023-08-25 13:51:07,561 INFO || App info kafka.admin.client for naument--shared-admin unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:51:07,562 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,562 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,562 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:51:07,562 INFO || [Worker clientId=connect-1, groupId=naument] Herder stopped [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:51:07,564 INFO || [Worker clientId=connect-1, groupId=naument] Herder stopped [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:51:07,564 INFO || Kafka Connect stopped [org.apache.kafka.connect.runtime.Connect] 2023-08-25 13:56:27,409 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,409 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,409 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || ReservedThread@73c3e6ea{RUNNING,thread=Thread[qtp974320615-122,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,409 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || ReservedThread@73c3e6ea{RESERVED,thread=Thread[qtp974320615-122,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,409 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,409 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,409 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,409 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:27.409381+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,409 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,409 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:27 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:27,409 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:27.409495+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:27,409 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,409 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,409 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=SPACE1,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,409 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,409 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,409 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:27,409 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,409 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,409 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,410 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,410 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,410 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,410 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,410 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,410 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,410 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,410 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,410 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,410 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:27,410 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:27,410 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,410 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:27,410 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:27,410 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:27,410 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:27,410 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:27,410 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:27,410 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:27,410 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,410 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,410 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,410 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,410 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,410 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,410 DEBUG || write(array HeapByteBuffer@6d4bb9b6[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:27,411 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:27,411 DEBUG || sendResponse info=null content=HeapByteBuffer@6def79d7[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:27 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5509345c] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@6def79d7[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:27,411 DEBUG || Date: Fri, 25 Aug 2023 10:56:27 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:27,411 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:27,411 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5509345c] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6def79d7[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:27,411 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:27,411 DEBUG || flushed 226 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,411 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:27,411 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:27,411 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5509345c] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:27,411 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:27,411 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:27,411 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2364373d) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:27,411 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:27 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:27,411 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,411 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,411 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,411 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,411 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,411 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,411 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:27,411 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,411 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:27,411 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,411 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,412 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:27,412 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,412 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,412 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,412 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,412 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:27,412 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,412 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,412 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:27,412 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:27,412 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,412 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:27,412 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,412 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:27,412 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:27,412 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,412 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,689 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,690 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:27,690 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:27,690 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,690 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=108, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=77) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 990 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=109, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=77, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 990 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1879, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=622) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421672 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,704 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1882, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=622, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,704 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421672 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,711 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:27,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:27,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=119, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=119, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:27,755 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1871, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,755 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,755 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,755 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=621) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387497 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1874, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=621, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,786 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1880, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,786 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,786 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,786 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,786 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,786 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,786 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,786 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=624) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,787 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421590 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,787 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1883, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=624, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,787 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421589 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=632, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=626) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,793 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147420560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,793 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=633, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,793 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,793 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147420560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1881, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=625) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421561 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1884, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421561 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1872, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=621) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387410 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,842 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1875, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=621, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,842 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387410 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1873, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=621) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387404 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:27,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1876, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=621, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:27,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147387403 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,190 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,191 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:28,191 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:28,191 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,191 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=109, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=78) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,193 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 489 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,193 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=110, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=78, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,193 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,193 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 489 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1882, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=623) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421171 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,205 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1885, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=623, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,205 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421171 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,255 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1874, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,255 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,255 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,255 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=622) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386997 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1877, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=622, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1883, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=625) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421089 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,287 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1886, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,287 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421089 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,293 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=633, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,293 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=627) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,294 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147420060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,294 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=634, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,294 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,294 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147420059 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1884, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=626) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1887, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147421060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1875, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=622) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386909 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,343 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1878, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=622, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,343 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386909 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1876, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=622) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386902 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1879, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=622, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386902 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,683 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:28,683 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:28,683 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=111, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,683 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,683 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=111, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:28,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,691 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,692 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,692 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:28,692 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:28,692 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,692 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=110, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=79) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2989 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=112, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=79, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2989 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1885, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=624) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,706 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420670 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,706 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1888, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=624, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,707 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420669 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1877, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=623) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1880, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=623, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1886, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=626) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420588 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,788 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1889, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,788 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420588 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=634, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=628) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,794 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147419559 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,794 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=635, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,794 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,794 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147419559 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1887, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=627) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1890, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420559 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1878, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,844 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=623) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386408 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,845 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1881, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=623, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,845 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386407 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1879, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=623) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386402 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,851 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1882, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=623, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:28,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147386401 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:28,866 TRACE || [Producer clientId=vk_nau27-schemahistory] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27_src.schema-changes [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:28,943 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,943 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,943 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1534/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,943 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1534/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,943 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1534/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,944 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=15]@2023-08-25T13:56:28.94404+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,944 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=15]@2023-08-25T13:56:28.944162+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1535/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,944 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=15]@2023-08-25T13:56:28.944225+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,944 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1535/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,944 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=16]@2023-08-25T13:56:28.944342+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,944 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,944 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,944 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1535/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,944 DEBUG || filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>> applicat...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,944 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>> applicat...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,944 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>> applicat...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,944 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>> applicat...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1535/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,944 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,944 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,944 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,945 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,945 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,945 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,945 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,945 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,945 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,945 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,945 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,945 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,945 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,945 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,945 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,945 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,947 DEBUG || write(array HeapByteBuffer@1b183919[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,947 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,947 DEBUG || sendResponse info=null content=HeapByteBuffer@614b1cb4[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,947 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,947 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c4cc592] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,947 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@614b1cb4[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,947 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,947 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,947 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c4cc592] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,947 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@614b1cb4[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,947 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,947 DEBUG || flushed 173 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,947 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,947 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,947 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c4cc592] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,947 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,947 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,947 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,947 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,947 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,947 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,947 DEBUG || complete(org.eclipse.jetty.util.Callback$3@714e43b4) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,947 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,947 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,947 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,947 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,947 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,947 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,947 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,947 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,947 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,948 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,948 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,948 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,948 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,948 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,948 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,948 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,948 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,948 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,948 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,948 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,948 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1538/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1538/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1538/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1537/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1537/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=1,epc=32]@2023-08-25T13:56:28.949317+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,949 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1537/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.949457+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,949 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=32]@2023-08-25T13:56:28.949542+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,949 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=32]@2023-08-25T13:56:28.949461+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1539/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,949 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=32]@2023-08-25T13:56:28.949622+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,949 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.949572+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1537/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,949 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.949632+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,949 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1539/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,949 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1537/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,949 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.949752+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,949 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1539/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,949 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1537/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,949 DEBUG || filled 142 HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,949 DEBUG || filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,949 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,949 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,949 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,950 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,950 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,949 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1539/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,950 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,950 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,949 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,950 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,950 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,950 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,950 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,950 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,950 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,950 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,950 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,950 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,950 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,950 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,950 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,950 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,950 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,951 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,951 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,951 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,950 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,951 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,951 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,951 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,951 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,951 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,951 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,951 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,951 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,951 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,952 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,952 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,952 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960988951 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 742957ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,952 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,952 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,952 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,952 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,952 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,952 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,952 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,952 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,952 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,952 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,952 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,952 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,953 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960988952 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,953 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,953 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 742956ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,953 DEBUG || write(array HeapByteBuffer@3283f0f7[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,953 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,953 DEBUG || sendResponse info=null content=HeapByteBuffer@b5324a0[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,953 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,953 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6d2035dc] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,953 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@b5324a0[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,953 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,953 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,953 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6d2035dc] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,953 DEBUG || write(array HeapByteBuffer@3f47cd5c[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,953 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@b5324a0[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,953 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,953 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || sendResponse info=null content=HeapByteBuffer@a0f54e5[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || flushed 1047 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,954 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@f6b7004] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6d2035dc] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@a0f54e5[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,954 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,954 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,954 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,954 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@f6b7004] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@a0f54e5[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,954 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,954 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || complete(org.eclipse.jetty.util.Callback$3@3031eae6) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,954 DEBUG || flushed 1671 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,954 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,954 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@f6b7004] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,954 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,954 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,954 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,954 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,954 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,954 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,954 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,954 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,954 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,954 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,954 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,954 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || complete(org.eclipse.jetty.util.Callback$3@17b11eaf) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,955 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,955 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,955 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,955 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,955 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,955 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,955 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,955 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=6/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,955 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,955 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,955 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,955 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,955 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,955 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,955 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,956 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=16]@2023-08-25T13:56:28.955937+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,955 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,956 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,956 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,956 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=16]@2023-08-25T13:56:28.956109+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,956 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,956 DEBUG || ReservedThread@73c3e6ea{RESERVED,thread=Thread[qtp974320615-122,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=16]@2023-08-25T13:56:28.956175+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,956 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,956 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,956 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,956 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,956 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.956393+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,956 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,956 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,956 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,956 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,956 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,956 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,957 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=URI,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,957 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,956 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,956 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,957 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,957 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,957 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,957 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,957 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.957403+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,957 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,957 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,957 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.957468+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,957 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,957 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,957 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,957 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,957 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,957 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,957 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,957 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,957 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,957 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,957 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,958 DEBUG || write(array HeapByteBuffer@13d2bcee[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,958 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,958 DEBUG || sendResponse info=null content=HeapByteBuffer@5e55e82[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,958 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,958 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@195221b6] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,958 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@5e55e82[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,958 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,958 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,958 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@195221b6] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,958 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@5e55e82[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,958 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,959 DEBUG || flushed 311 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,959 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,959 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,959 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@195221b6] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,959 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,959 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,959 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1b8d207e) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,959 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,959 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,959 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,959 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,959 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,959 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,959 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,959 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,959 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,959 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,959 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,959 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,959 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,959 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,959 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,959 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,960 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,960 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,960 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,960 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,960 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,960 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,960 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,957 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,959 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,957 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.957644+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,960 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,960 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,960 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,960 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.960425+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,960 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,960 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=33]@2023-08-25T13:56:28.960497+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,960 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,960 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=33]@2023-08-25T13:56:28.960829+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,960 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=33]@2023-08-25T13:56:28.960887+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,960 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=HEADER,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,960 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,960 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=33]@2023-08-25T13:56:28.960696+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,960 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,961 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,960 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,961 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,961 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,961 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,960 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,961 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,961 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,961 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,961 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,961 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,961 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,961 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,961 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,961 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,961 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,961 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,961 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,961 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,961 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,961 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,961 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,962 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,962 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,962 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,962 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,962 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,962 DEBUG || write(array HeapByteBuffer@d631d04[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,962 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,962 DEBUG || sendResponse info=null content=HeapByteBuffer@424a0227[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,962 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,962 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,962 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@10624558] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@424a0227[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,962 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,962 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@10624558] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,962 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\n\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@424a0227[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,962 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,962 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960988962 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,962 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 742947ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,963 DEBUG || flushed 312 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,963 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,963 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,963 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@10624558] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,963 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,963 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,963 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 DEBUG || complete(org.eclipse.jetty.util.Callback$3@483fe444) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,963 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || write(array HeapByteBuffer@2b447c75[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,963 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,963 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,963 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,963 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,963 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,963 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,963 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,963 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,964 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,964 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,964 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,963 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || sendResponse info=null content=HeapByteBuffer@12d36b15[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,964 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@8a95c88] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@12d36b15[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,964 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,964 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,964 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,964 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.964633+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,964 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@8a95c88] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@12d36b15[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,964 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,964 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=17]@2023-08-25T13:56:28.964732+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,964 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,964 DEBUG || flushed 950 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,964 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,964 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@8a95c88] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,964 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,964 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,964 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,965 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,965 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,965 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,965 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,965 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,965 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,965 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,965 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,965 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,965 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,965 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,965 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,965 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,965 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.965084+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,965 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.965635+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,965 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,965 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,965 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,965 DEBUG || complete(org.eclipse.jetty.util.Callback$3@b79edca) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,965 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,965 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,965 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,965 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,965 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,965 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,965 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,965 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,965 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.965923+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,965 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,965 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,966 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,966 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,966 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.966002+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,966 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,966 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.966082+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,966 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,966 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:28.966158+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,966 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,966 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,966 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,966 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,966 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,966 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,966 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,966 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,966 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,966 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,966 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,966 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,966 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,966 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,966 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,966 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,966 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,966 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,966 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,966 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,966 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,966 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,966 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,966 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,966 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,967 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,966 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=5} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,966 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,967 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,967 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,967 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,967 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,967 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,967 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960988967 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 742942ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,967 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,967 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,967 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,967 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,967 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,967 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,967 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,967 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,968 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,968 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,968 DEBUG || write(array HeapByteBuffer@2a21a886[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,968 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,968 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,968 DEBUG || sendResponse info=null content=HeapByteBuffer@52c6d62b[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,968 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,968 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,968 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,968 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d7fc3fa] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,968 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,968 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@52c6d62b[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,968 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,968 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,968 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d7fc3fa] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,968 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@52c6d62b[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,968 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,968 DEBUG || flushed 1574 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,968 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,968 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,968 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d7fc3fa] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,968 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,968 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,968 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,968 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,968 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,968 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,968 DEBUG || complete(org.eclipse.jetty.util.Callback$3@683e1a4b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,968 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,968 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,968 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,968 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,968 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,968 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,968 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,969 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,969 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,969 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,969 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,969 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,969 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || ReservedThread@73c3e6ea{RUNNING,thread=Thread[qtp974320615-122,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,969 DEBUG || ReservedThread@73c3e6ea{RESERVED,thread=Thread[qtp974320615-122,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,969 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,968 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,968 DEBUG || write(array HeapByteBuffer@161d1c28[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,969 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,969 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,969 DEBUG || sendResponse info=null content=HeapByteBuffer@66c5da72[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,969 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,969 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,969 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@327a7dfe] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,970 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@66c5da72[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,970 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,970 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,970 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,970 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,970 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@327a7dfe] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,970 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\n\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@66c5da72[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,970 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,970 DEBUG || flushed 1071 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,970 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,970 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,970 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@327a7dfe] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,970 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,970 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,970 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,970 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,970 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,970 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,970 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2da14978) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,970 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,970 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,970 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,970 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,970 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,970 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,970 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,970 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,970 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,970 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,970 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,970 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,970 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,970 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,970 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,970 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,970 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,971 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,971 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,971 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,971 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,971 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,971 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,971 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,970 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,971 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,971 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,971 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,971 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=1,epc=34]@2023-08-25T13:56:28.971874+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,971 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.971901+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,972 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.971962+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,972 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=34]@2023-08-25T13:56:28.971992+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,972 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,972 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=34]@2023-08-25T13:56:28.971955+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=34]@2023-08-25T13:56:28.972085+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,972 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,972 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,972 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=18]@2023-08-25T13:56:28.972017+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,972 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,972 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,972 DEBUG || filled 158 HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,972 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,972 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,972 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,972 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,972 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,972 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,972 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,972 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,972 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,972 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,972 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,973 DEBUG || write(array HeapByteBuffer@5da15b7a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,973 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,973 DEBUG || sendResponse info=null content=HeapByteBuffer@4f6831d3[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,973 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,973 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4d22e5e3] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,973 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@4f6831d3[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,973 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,973 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,973 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4d22e5e3] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,973 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@4f6831d3[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,973 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,973 DEBUG || flushed 198 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,973 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,973 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,973 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4d22e5e3] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,974 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,974 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,974 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 DEBUG || complete(org.eclipse.jetty.util.Callback$3@7a8825c7) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,974 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,974 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,974 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,974 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,974 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,972 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.972297+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,974 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,974 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,974 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,974 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,972 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,974 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:28.974835+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,974 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,974 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,974 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:28.974916+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,974 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || ReservedThread@73c3e6ea{RESERVED,thread=Thread[qtp974320615-122,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:28.974997+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,975 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,975 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:28.975116+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,975 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,975 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,975 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,975 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,975 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,975 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,975 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,976 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,976 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,976 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,975 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,976 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,976 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,976 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,976 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,976 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,976 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,976 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,976 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,976 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,976 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,976 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,976 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,976 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,976 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,976 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,976 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,976 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,976 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,976 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,976 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,976 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,976 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,976 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,976 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,976 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,976 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,976 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,976 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:28,976 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,976 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,977 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,977 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960988977 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,977 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,977 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 742932ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:28,977 DEBUG || write(array HeapByteBuffer@1d3eda2d[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,977 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,977 DEBUG || sendResponse info=null content=HeapByteBuffer@32009646[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,977 DEBUG || write(array HeapByteBuffer@ffcf178[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,977 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,977 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,977 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ad74e21] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,977 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@32009646[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || sendResponse info=null content=HeapByteBuffer@639eab8e[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,977 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,977 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ad74e21] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,977 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5cc70bc1] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,977 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>cation/js...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@32009646[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,977 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@639eab8e[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,977 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,977 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5cc70bc1] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || flushed 1694 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>son\r\n\r\n\n\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@639eab8e[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ad74e21] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,978 DEBUG || flushed 216 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,978 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,978 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,978 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5cc70bc1] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,978 DEBUG || complete(org.eclipse.jetty.util.Callback$3@482b17d5) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,978 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,978 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,978 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,978 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || complete(org.eclipse.jetty.util.Callback$3@405c24e) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,978 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,978 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,978 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,978 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,978 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,978 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,978 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,978 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,978 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,978 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.979079+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,979 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,979 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.979196+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,979 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:56:28.979158+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:28.979279+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,979 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,979 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,979 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,979 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,979 DEBUG || filled 157 HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 157 HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,979 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,979 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,980 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,980 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,980 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,980 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,980 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,980 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,980 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,980 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,980 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,980 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,980 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,980 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,980 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,981 DEBUG || write(array HeapByteBuffer@3d68f04[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,981 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,981 DEBUG || sendResponse info=null content=HeapByteBuffer@ad4d7a8[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,981 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,981 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@48db8766] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,981 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@ad4d7a8[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,981 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,981 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,981 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@48db8766] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,981 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>son\r\n\r\n\n\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@ad4d7a8[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,981 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,981 DEBUG || flushed 198 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,981 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,981 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,981 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@48db8766] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,981 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,981 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,981 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,981 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,981 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,979 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,981 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,981 DEBUG || complete(org.eclipse.jetty.util.Callback$3@c88240a) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,981 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,981 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,981 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,981 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,981 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,981 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,981 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,981 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,981 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,981 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,981 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,981 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,982 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,982 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=35]@2023-08-25T13:56:28.982306+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,982 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,982 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,982 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=35]@2023-08-25T13:56:28.982376+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=35]@2023-08-25T13:56:28.982437+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,982 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,982 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=36]@2023-08-25T13:56:28.982489+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:28,982 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,982 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,982 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,982 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,983 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,983 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,983 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,982 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,983 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,983 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,983 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,983 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,983 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,983 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,983 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,983 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,983 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,983 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,983 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,983 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,983 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,983 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,983 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:28,983 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,983 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,983 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,983 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:28,984 DEBUG || write(array HeapByteBuffer@5323ecb8[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,984 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,984 DEBUG || sendResponse info=null content=HeapByteBuffer@1bfdbae4[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,984 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,984 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7a09b1e8] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,984 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@1bfdbae4[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,984 DEBUG || Date: Fri, 25 Aug 2023 10:56:28 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,984 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:28,984 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7a09b1e8] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,984 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1bfdbae4[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,984 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,984 DEBUG || flushed 226 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,984 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,984 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:28,984 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7a09b1e8] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,984 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,984 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:28,984 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:28,984 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,984 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,985 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5dc4cd58) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:28,985 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,985 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,985 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,985 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:28 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:28,985 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:28,985 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:28,985 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:28,985 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:28,985 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:28,985 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,985 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:28,985 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:28,985 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:28,985 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,192 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,193 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,193 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:29,193 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:29,193 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,193 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=112, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=80) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,195 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,195 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=113, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=80, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,195 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,196 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1888, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=625) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,207 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420169 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1891, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420168 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,255 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1880, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=624) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,256 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1883, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=624, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,256 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1889, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=627) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420087 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,289 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1892, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,289 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420087 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=635, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=629) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,295 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147419058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,295 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=636, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,295 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,295 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147419058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1890, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=628) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1893, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147420058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,319 DEBUG || ReservedThread@561e1d17{RESERVED,thread=Thread[qtp974320615-126,5,main]} task=null ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:29,319 DEBUG || ReservedThread@561e1d17{IDLE,thread=Thread[qtp974320615-126,5,main]} exited ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:29,319 DEBUG || ran ReservedThread@561e1d17{IDLE,thread=null} in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1881, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=624) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385907 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,345 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1884, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=624, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,345 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,346 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385907 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1882, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=624) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385901 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1885, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=624, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385901 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,693 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,694 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:29,694 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:29,694 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,694 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:29,695 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=113, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=81) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=114, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=81, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,699 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,700 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:29,700 TRACE Oracle|vk_nau27|streaming Current time 1692960989700 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:29,700 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,702 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:29,702 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931598474 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:29,702 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,703 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931598475' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,708 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1891, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=626) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419668 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1894, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419668 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,709 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931595527, endScn=289931598474, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:29,709 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931595528', endScn => '289931598474', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:29,718 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931595527, 289931598474] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1883, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=625) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,756 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,756 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1886, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,757 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385495 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1892, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=628) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419586 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,790 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1895, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,790 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,791 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,791 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,791 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419586 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,795 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=636, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=630) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,796 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147418557 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,796 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=637, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,796 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,796 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147418557 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1893, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=629) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419558 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1896, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419557 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1884, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,846 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=625) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,847 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385406 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,847 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1887, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,847 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385405 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1885, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=625) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385400 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:29,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1888, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=625, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:29,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147385400 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,194 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,195 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:30,195 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:30,195 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,195 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=114, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=82) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,196 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=115, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=82, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,196 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,196 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1894, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=627) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419168 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1897, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419168 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,228 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1247/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,229 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1247/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,229 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1247/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,229 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.229186+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,229 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.22932+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1247/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,229 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.22939+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,229 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1247/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,229 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.229502+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,229 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,229 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,229 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1247/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,229 DEBUG || filled 129 HeapByteBuffer@1d18b05f[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,229 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1247/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,229 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,229 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@1d18b05f[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,229 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,229 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,229 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,230 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,230 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,230 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,230 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,230 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,230 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,230 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,230 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,230 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,230 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,230 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,230 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,231 DEBUG || write(array HeapByteBuffer@4ffcea52[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,231 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,231 DEBUG || sendResponse info=null content=HeapByteBuffer@2bef0a08[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,231 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,231 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@78dc6c9c] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,231 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@2bef0a08[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,231 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,231 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,231 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@78dc6c9c] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,231 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>son\r\n\r\n\n\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2bef0a08[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,231 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,232 DEBUG || flushed 173 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,232 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,232 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,232 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@78dc6c9c] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,232 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,232 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,232 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,232 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 DEBUG || complete(org.eclipse.jetty.util.Callback$3@60a8dd8e) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,232 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,232 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,232 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,232 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,232 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,232 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,232 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,232 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,232 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,232 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,232 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,232 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,232 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,232 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,232 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,232 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,233 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,233 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,233 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,233 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,233 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,233 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,233 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1250/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,233 DEBUG || ReservedThread@73c3e6ea{RUNNING,thread=Thread[qtp974320615-122,5,main]} was=RUNNING next=IDLE size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,233 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1250/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1250/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1248/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1248/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1248/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,233 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=36]@2023-08-25T13:56:30.233875+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,233 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=36]@2023-08-25T13:56:30.233934+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1248/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,234 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=36]@2023-08-25T13:56:30.233985+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,233 DEBUG || ReservedThread@73c3e6ea{IDLE,thread=Thread[qtp974320615-122,5,main]} exited ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=37]@2023-08-25T13:56:30.234074+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,234 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1248/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,234 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,234 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,233 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:30.233566+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1248/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || ran ReservedThread@73c3e6ea{IDLE,thread=null} in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:30,234 DEBUG || filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\n\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\n\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\n\nso...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\n\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1248/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,234 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1} startReservedThread p=1 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,234 DEBUG || queue ReservedThread@2da5db67{PENDING,thread=null} startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:30,234 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || run ReservedThread@2da5db67{PENDING,thread=null} in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:30,234 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || ReservedThread@2da5db67{PENDING,thread=Thread[qtp974320615-176,5,main]} was=PENDING next=RESERVED size=0+1 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:30.234423+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1251/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,234 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:56:30.234473+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,234 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.234536+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,234 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,234 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,234 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1251/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,234 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,234 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1251/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1251/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || filled 142 HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,234 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,234 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,234 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,234 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,234 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,234 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,234 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,234 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,235 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,235 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,235 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,235 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,235 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,235 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,235 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,235 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,235 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,235 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,235 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,235 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,235 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,235 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,235 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,236 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,236 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,236 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990236 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741673ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,236 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,237 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990237 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741672ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,237 DEBUG || write(array HeapByteBuffer@4287c137[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,237 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,237 DEBUG || sendResponse info=null content=HeapByteBuffer@41fa54e3[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,237 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,237 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e75c5ac] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,237 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@41fa54e3[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,237 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,237 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,237 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e75c5ac] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,237 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\n\x00\x00...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@41fa54e3[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,237 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,237 DEBUG || flushed 1671 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,237 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,237 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,237 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e75c5ac] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,237 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,238 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,238 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,238 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 DEBUG || complete(org.eclipse.jetty.util.Callback$3@246b65a9) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,238 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,238 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,238 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,238 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,238 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,238 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,238 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,238 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,238 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,239 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,239 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,239 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,239 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,239 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.239418+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,239 DEBUG || write(array HeapByteBuffer@6c9a9487[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,239 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.239505+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,239 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,239 DEBUG || sendResponse info=null content=HeapByteBuffer@7a4e8041[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,239 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=5} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,239 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1e3315ca] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,239 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@7a4e8041[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,239 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,239 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,239 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1e3315ca] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,239 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>\nson\r\n\r\n\x00...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@7a4e8041[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,239 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,239 DEBUG || flushed 1047 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=5} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,239 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,239 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,239 TRACE Oracle|vk_nau27|streaming Transaction 040002008bcc2a00 is not yet committed and START event detected. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,239 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,239 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,239 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1e3315ca] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,239 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595537, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='040002008bcc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f4c.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 040002008bcc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595541, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='01001900b3ba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f50.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 01001900b3ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,239 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.239579+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,240 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=6} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,240 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,239 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.239473+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,240 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,240 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,240 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,240 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595546, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='10001c002e332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f56.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 10001c002e332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,240 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595550, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='130020004eb42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f59.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || complete(org.eclipse.jetty.util.Callback$3@348ea3f0) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,240 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,240 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,240 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 130020004eb42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,240 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,240 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,240 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,240 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,240 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,240 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,240 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,240 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,240 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595554, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0d000800d3d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f5e.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0d000800d3d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595558, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0700030074ba2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f61.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0700030074ba2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 6 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595563, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0300160039b32800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f67.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0300160039b32800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595574, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0b000200430a2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f96.0084, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0b000200430a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595578, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='090002004fea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026f9a.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 090002004fea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,240 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595583, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0e001c00e43f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fa0.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0e001c00e43f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595587, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0f001e004aea2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fa4.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0f001e004aea2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595592, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0c00070005522b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026faa.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 TRACE Oracle|vk_nau27|streaming Transaction 0c00070005522b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=3,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.240984+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,240 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595596, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='08001100435c2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fad.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 08001100435c2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,240 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595601, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0400090012d42a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fb3.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 0400090012d42a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595612, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='02000800fdce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fe5.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 02000800fdce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1} startReservedThread p=1 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595616, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='12001f00c8462a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fe9.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 12001f00c8462a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=7} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595621, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='11001200ec352b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026fef.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 11001200ec352b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || queue ReservedThread@23af434f{PENDING,thread=null} startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:30,241 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=7} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,241 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,241 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,241 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,241 DEBUG || run ReservedThread@23af434f{PENDING,thread=null} in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:56:30,241 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,241 DEBUG || ReservedThread@23af434f{PENDING,thread=Thread[qtp974320615-178,5,main]} was=PENDING next=RESERVED size=0+1 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,241 DEBUG || ReservedThread@23af434f{RESERVED,thread=Thread[qtp974320615-178,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,241 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,241 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,241 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,241 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,241 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,241 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,241 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,241 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,241 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,241 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,241 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595625, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0d001d008bd82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026ff2.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 0d001d008bd82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.241303+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595630, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='1300190052af2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026ff8.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Transaction 1300190052af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,241 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=0/8,pending=1}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.241223+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,241 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,241 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,241 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=7} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,241 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595634, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0a001000f71a3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00026ffc.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 0a001000f71a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,241 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:56:30.241818+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,242 DEBUG || filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,242 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=8} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595639, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='03000e0074b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027002.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 03000e0074b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,242 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595650, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='05001a0014742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027035.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 05001a0014742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595654, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='1400080044b12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027039.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 1400080044b12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595659, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='060021003b062a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002703f.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 060021003b062a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595663, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='07000a009dc82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027042.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,242 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,242 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 07000a009dc82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595668, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='09000a00709f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027048.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 09000a00709f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595671, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0e000200d03f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002704b.00f8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 0e000200d03f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595676, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0f00100089ed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002704f.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 0f00100089ed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595687, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='01000500e3b22500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002707c.00b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 01000500e3b22500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595691, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='04001b0007c52a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027080.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 04001b0007c52a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595696, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='08001000645f2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027086.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 08001000645f2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || write(array HeapByteBuffer@50a375d8[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,242 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,242 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,242 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || sendResponse info=null content=HeapByteBuffer@2bf6dc99[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595700, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='13001c00c4b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027089.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 13001c00c4b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595705, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='09000600d3ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002708f.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 09000600d3ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595716, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0e000900933f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000270e3.01e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 0e000900933f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595720, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='12000f00be4c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000270e8.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 12000f00be4c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595725, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='04000a0030862a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000270ee.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 04000a0030862a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595729, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0d001a00c3d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000270f1.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 0d001a00c3d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595734, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='13001b00c2a42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000270f7.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 13001b00c2a42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595745, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='03000800ceaf2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027124.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 03000800ceaf2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595749, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='08001900e8552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027127.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Transaction 08001900e8552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595754, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='09000800e1ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002712d.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 09000800e1ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595758, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='100018006b332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027130.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 100018006b332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595763, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0a001600991d3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027136.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0a001600991d3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595767, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='05000f0032762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027139.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 05000f0032762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595772, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0b001e00820d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002713f.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0b001e00820d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595783, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0c000a0082512b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002716f.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0c000a0082512b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595787, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0e00000016442900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027173.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0e00000016442900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595792, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='040013004acd2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027179.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 040013004acd2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595796, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='14001800cfaa2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002717c.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 14001800cfaa2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595801, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='06001c001d0e2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027182.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 06001c001d0e2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595812, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='01000a0056be2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271d2.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 01000a0056be2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595816, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='13000c001cb62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271d6.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 13000c001cb62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595821, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='12000100b0492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271dc.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 12000100b0492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595825, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='10001600a9382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271df.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 10001600a9382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595830, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0d001100bad92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271e5.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0d001100bad92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595834, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0f000d0038f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271e9.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0f000d0038f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595839, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0a0011004c193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000271ef.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0a0011004c193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595850, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='090013004bf12f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002724e.00dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 090013004bf12f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595854, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0b000c00eb0b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027252.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0b000c00eb0b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595859, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='030013004aae2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027258.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 030013004aae2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595863, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0e001e00013f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002725b.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0e001e00013f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595868, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='060014006c0a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027261.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 060014006c0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SYSTEM_CHECK' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595871, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:13Z, transactionId='0100080054b52500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027263.00ec, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0100080054b52500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595874, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='08001e00bc612e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027264.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 08001e00bc612e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595877, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='05001200ff6e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027269.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 05001200ff6e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595880, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='0200090064d12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027270.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0200090064d12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595883, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='1200140035432a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027272.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 1200140035432a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595886, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='0d00210093d52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027279.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0d00210093d52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595889, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='03000f00cfbb2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002727d.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 03000f00cfbb2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595892, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='10000400d5382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027281.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 10000400d5382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595895, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='04000b006bd02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027283.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 04000b006bd02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595897) LogMinerEventRow{scn=289931595898, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='0b0007001c112f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002728a.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0b0007001c112f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595897) LogMinerEventRow{scn=289931595899, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='0b000b00f70f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002728f.00dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0b000b00f70f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595900, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:14Z, transactionId='0b001b00b60c2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027294.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0b001b00b60c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595903, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0800060026592e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000272aa.00c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0800060026592e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595906, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='12001c00ff4e2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000272ab.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 12001c00ff4e2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595909, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='06000e00a6072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000272b1.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 06000e00a6072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595912, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='01001500b0b92500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000272b8.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 01001500b0b92500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595914) LogMinerEventRow{scn=289931595915, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0d0004004bd12900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027316.00b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Transaction 0d0004004bd12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,243 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595914) LogMinerEventRow{scn=289931595918, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0b001c00f10e2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027355.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0b001c00f10e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595921, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='10000e005c362a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027396.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 10000e005c362a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595924, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='03001200ccb02800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273ad.009c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 03001200ccb02800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595927, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='060018003a0b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273b1.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 060018003a0b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595930, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0100160012be2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273b8.017c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0100160012be2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595933, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='140000001cb22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273c0.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 140000001cb22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595936, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='11001500582b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273c5.0164, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 11001500582b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595939, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0800080083552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273ca.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0800080083552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595941) LogMinerEventRow{scn=289931595942, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='13001800b7b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273d0.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 13001800b7b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595941) LogMinerEventRow{scn=289931595945, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0e001500fd482900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273df.01a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0e001500fd482900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595944) LogMinerEventRow{scn=289931595948, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0f000a008bf42900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273e5.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0f000a008bf42900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931595944) LogMinerEventRow{scn=289931595951, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='12000700954c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273f6.00b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 12000700954c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595953, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='09000300b4ed2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273f6.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 09000300b4ed2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595956, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0d00170096d92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273fa.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0d00170096d92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595959, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='040012009acc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000273fd.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 040012009acc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595962, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='07000700f6c82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027418.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 07000700f6c82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595965, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='03000d001ab32800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002741c.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 03000d001ab32800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595968, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0800200025552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002741f.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0800200025552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595971, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0b001000cd102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027441.0014, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0b001000cd102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595974, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='13000600aaae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027445.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 13000600aaae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595977, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='1000130024372a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027449.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 1000130024372a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595980, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0e001600a2422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027457.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0e001600a2422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595983, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0c00160021572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002745b.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0c00160021572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595986, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='06000900abfe2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002745e.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 06000900abfe2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595989, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='12000500b54f2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027467.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 12000500b54f2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595992, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0200140057cc2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002746c.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0200140057cc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595995, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0100000045bb2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027471.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0100000045bb2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931595998, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='09001f00b6ed2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027474.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 09001f00b6ed2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596001, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='04000f00d4d02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002747c.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 04000f00d4d02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596004, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0a000400501f3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002747f.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0a000400501f3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596007, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0500170051732b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027498.01e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0500170051732b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596010, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='14000a0058ab2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002749b.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 14000a0058ab2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596013, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='1300110007b32b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274a2.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 1300110007b32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596016, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0f001600eff52900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274a4.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0f001600eff52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596018) LogMinerEventRow{scn=289931596019, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='110009007c342b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274b5.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 110009007c342b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596023, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0a00170003193100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274bc.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Transaction 0a00170003193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,244 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596027, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0d00160004d82900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274ca.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0d00160004d82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596029) LogMinerEventRow{scn=289931596030, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0b001a00150c2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274da.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0b001a00150c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596033, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='030011000fb02800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274e8.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 030011000fb02800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596036, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='0700020081cc2d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000274f9.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0700020081cc2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596039, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:15Z, transactionId='050007000f742b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027507.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 050007000f742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596048, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0e001300fd422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002750c.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0e001300fd422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596051, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='08001c0032562e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027513.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 08001c0032562e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596054, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='1000170025312a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027517.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 1000170025312a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596057, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='14000e008dae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002751c.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 14000e008dae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596060, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='060012009c0a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027520.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 060012009c0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596063, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0f00180017ec2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027525.0110, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0f00180017ec2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596066, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='09002000f5f82f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002755e.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 09002000f5f82f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596069, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0a001800901b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027562.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0a001800901b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596072, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='1200080024472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027566.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 1200080024472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596075, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='04000c00c4ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027579.0094, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 04000c00c4ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596078, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d00140013da2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002757c.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0d00140013da2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596081, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='03000600d5b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000275b6.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 03000600d5b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596084, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='050009005c772b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000275ba.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 050009005c772b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596087, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='01001b0047b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000275bd.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 01001b0047b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596089) LogMinerEventRow{scn=289931596090, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c001700e3582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027620.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0c001700e3582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596089) LogMinerEventRow{scn=289931596092, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c000500465c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027635.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0c000500465c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596089) LogMinerEventRow{scn=289931596094, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c000c007d5a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027637.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0c000c007d5a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596096, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c000000ac532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027640.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0c000000ac532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596099, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0800030099032e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027644.0110, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0800030099032e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596102, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0b000900270f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027679.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0b000900270f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596105, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='140006003aab2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002767d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 140006003aab2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596108, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='1300170044bb2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027682.0108, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 1300170044bb2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596111, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='10000b007f352a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027685.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 10000b007f352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596114, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0e000500db472900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027688.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0e000500db472900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596116) LogMinerEventRow{scn=289931596117, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02000c00a3cd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000276b8.00a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 02000c00a3cd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596116) LogMinerEventRow{scn=289931596119, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02001b00f2d12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000276cc.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 02001b00f2d12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596116) LogMinerEventRow{scn=289931596121, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02000300edce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000276cf.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 02000300edce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596123, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0200170047d22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000276fb.0184, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0200170047d22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596126, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='09001100bef32f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000276ff.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 09001100bef32f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596129, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='12001900144d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027728.01e0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 12001900144d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596132, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d000e00b7d12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002772d.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0d000e00b7d12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596135, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='04000000b4cc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027732.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 04000000b4cc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596138, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0f00000094f32900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027736.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 0f00000094f32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596141, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='030000005bb62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002773b.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 030000005bb62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596144, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='080014006e582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002773f.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 080014006e582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596147, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='13001f00ecb22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027744.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,245 TRACE Oracle|vk_nau27|streaming Transaction 13001f00ecb22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596150, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='06000300b8082a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027748.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 06000300b8082a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596153, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0e000e008f462900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002774d.0188, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0e000e008f462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596156, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0a000e00b31c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027752.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0a000e00b31c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596159, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='05000a006c6f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027757.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 05000a006c6f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596162, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='120020007f492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002775b.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 120020007f492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596165, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d00060029d92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027760.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0d00060029d92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596168, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='1400120033ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027764.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 1400120033ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596171, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='03002000feb82800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027769.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 03002000feb82800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596174, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='080013006f582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002776c.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 080013006f582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596177, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='110011004d2e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027799.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 110011004d2e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596180, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='070013006ec82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002779d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 070013006ec82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596183, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0f001b0017ef2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277a1.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0f001b0017ef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596185) LogMinerEventRow{scn=289931596186, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='09001800e2ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277a9.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 09001800e2ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596187, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0900210031f02f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277c2.00b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0900210031f02f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596190, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0a000700201e3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277c6.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0a000700201e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596193, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='05000000c4742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277ca.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 05000000c4742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596196, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='1400170075b52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277cf.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 1400170075b52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596199, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c001400e7502b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277d3.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0c001400e7502b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596202, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0f00130065ef2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277d8.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0f00130065ef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596205, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0400030052d62a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277dc.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0400030052d62a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596208, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='13000b00fcb22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277e1.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 13000b00fcb22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596211, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='12000900834a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277e6.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 12000900834a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596214, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0a000000ce1a3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277eb.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0a000000ce1a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596217, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02001100f9ca2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277ef.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 02001100f9ca2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596220, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='11000c00692a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277f4.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 11000c00692a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596223, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='01001c0057b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277f8.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 01001c0057b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596226, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='070012001bca2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000277fc.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 070012001bca2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596229, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c000800cc5c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027800.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0c000800cc5c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596232, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0e001f001e492900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027805.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0e001f001e492900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596235, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0b001800170d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002780a.0108, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0b001800170d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596238, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d001b000cd52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002780e.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0d001b000cd52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596241, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='03001000b9b82800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027813.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 03001000b9b82800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596244, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='08000200b75b2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027817.0164, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 08000200b75b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596247, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='05000800b3732b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002781c.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 05000800b3732b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596250, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='10001900f93b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027820.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 10001900f93b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596253, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0200050045cd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027825.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0200050045cd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596256, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='09001400d2eb2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027829.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 09001400d2eb2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596259, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='11002100b2322b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002782e.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 11002100b2322b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596262, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='06000d003aff2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027832.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 06000d003aff2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596265, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='04000d0060d12a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027837.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 04000d0060d12a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596268, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='01000100dfb82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002783b.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 01000100dfb82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596271, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0700210038c32d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027840.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0700210038c32d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596274, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='12000c00e24c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027845.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 12000c00e24c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596277, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='13001600bcb62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002784a.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 13001600bcb62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596280, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c0018008c5a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002784e.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0c0018008c5a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596283, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02000e0058c92700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027853.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 02000e0058c92700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596286, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='140019009bb32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027857.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 140019009bb32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596289, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='110004005c2b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002785c.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 110004005c2b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596292, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0f001d0070f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027861.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,246 TRACE Oracle|vk_nau27|streaming Transaction 0f001d0070f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596295, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='03000a0059b22800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027872.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 03000a0059b22800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596298, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='08000e00325d2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027877.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 08000e00325d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596301, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='12000d00e64a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002787b.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 12000d00e64a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596304, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0700180099cc2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027896.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0700180099cc2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596307, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0a000500981c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002789f.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0a000500981c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596310, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0b001900a70d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278c3.0030, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0b001900a70d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596291) LogMinerEventRow{scn=289931596312, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0b000500450f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278e0.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0b000500450f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596315, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d00150002d72900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278e1.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0d00150002d72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596318, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='10000500ac2d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278e5.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 10000500ac2d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596321, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c00150090532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278e8.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0c00150090532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596324, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='06001f002a022a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278f7.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 06001f002a022a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596327, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02001d0034d22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000278fc.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 02001d0034d22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596330, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0e002100cb472900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027901.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0e002100cb472900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596332) LogMinerEventRow{scn=289931596333, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='09001b007eef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027908.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 09001b007eef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596332) LogMinerEventRow{scn=289931596336, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='11001400c82c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027913.0060, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 11001400c82c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596332) LogMinerEventRow{scn=289931596339, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='03001a008db42800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027916.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 03001a008db42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596332) LogMinerEventRow{scn=289931596342, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0100210023ba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002792f.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0100210023ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596345, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='08001a00335d2e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027930.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 08001a00335d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596348, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='12001d001a4d2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027945.0054, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 12001d001a4d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596350) LogMinerEventRow{scn=289931596351, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0d001f00a3d32900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002795a.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0d001f00a3d32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596350) LogMinerEventRow{scn=289931596354, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='070017006ac72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027969.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 070017006ac72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596350) LogMinerEventRow{scn=289931596357, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0b000e00fd0a2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002796c.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0b000e00fd0a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596359, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0300140055b42800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002796b.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0300140055b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596362, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='080015006c5a2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002796f.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 080015006c5a2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596366, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0c00110052582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002798e.00ec, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0c00110052582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596368) LogMinerEventRow{scn=289931596369, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='02000a0064cb2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002799b.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 02000a0064cb2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596371) LogMinerEventRow{scn=289931596372, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0400110034ce2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000279c8.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0400110034ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596371) LogMinerEventRow{scn=289931596375, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='11001d00f8312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000279f0.00b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 11001d00f8312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596377, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='11002000502d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a00.0044, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 11002000502d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596380, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='13001000c9b32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a04.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 13001000c9b32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596383, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='0500040063752b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a08.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0500040063752b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596386, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='100008004b362a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a23.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 100008004b362a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596389, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:18Z, transactionId='07001e0076b82d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a32.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 07001e0076b82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596392, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='0e000c0039422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a39.0050, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0e000c0039422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596395, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='0c00020006542b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a3c.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 0c00020006542b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596398, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='120013006d512a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a55.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 120013006d512a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596401, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='14000f0021ae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a5a.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Transaction 14000f0021ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,247 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596404, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='02001300dad02700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a5d.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Transaction 02001300dad02700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596407, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:19Z, transactionId='06001d0094072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a82.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Transaction 06001d0094072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596410, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='09000f00a1ee2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027a98.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Transaction 09000f00a1ee2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,242 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4f3ff88f] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,248 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@2bf6dc99[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,248 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,248 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,248 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4f3ff88f] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,242 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,248 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2bf6dc99[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,248 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,248 DEBUG || flushed 312 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=7} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,248 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,248 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,248 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596412) LogMinerEventRow{scn=289931596413, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0d000d006ddf2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027aa8.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Transaction 0d000d006ddf2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596416, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0f000800afef2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027aaa.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Transaction 0f000800afef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,248 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,248 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,248 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4f3ff88f] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,248 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,248 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,248 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=7} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,248 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,248 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,248 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,248 DEBUG || complete(org.eclipse.jetty.util.Callback$3@382b14dd) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,248 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,248 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,248 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,248 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,248 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,248 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 7 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,249 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,249 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,249 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,249 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=8} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,249 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,249 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,249 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,249 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,249 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,249 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,249 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,249 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,249 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,249 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,249 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,248 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,249 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,249 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,249 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,248 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,249 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,248 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,249 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596419, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0300210086b42800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ab2.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 0300210086b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596422, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='11000e0043282b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027abb.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 11000e0043282b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596425, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0100170082bc2500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ac4.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 0100170082bc2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596428, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='08001b00f95e2e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ace.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 08001b00f95e2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596431, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0a000c003d033100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ad6.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 0a000c003d033100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596434, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0b001100240b2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ae7.01b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 0b001100240b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596437, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='12001e00c54b2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027af7.0030, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 12001e00c54b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596440, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='05001300356f2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b05.0164, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 05001300356f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596443, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1400030094ad2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b14.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 1400030094ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596446, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='07001d0008c92d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b23.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 07001d0008c92d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596449, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='10001400ea2a2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b31.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 10001400ea2a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596452, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='06001b003d042a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b47.0104, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Transaction 06001b003d042a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,249 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,250 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,250 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,250 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=37]@2023-08-25T13:56:30.250954+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,251 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=37]@2023-08-25T13:56:30.251016+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,251 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=37]@2023-08-25T13:56:30.251057+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,251 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,251 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,251 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,251 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,251 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,251 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,251 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,251 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,251 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,251 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,251 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,249 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,249 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,251 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,251 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,251 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=9/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,251 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,251 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,251 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=38]@2023-08-25T13:56:30.25112+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,251 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,251 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,251 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,251 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,250 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,251 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=9/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,251 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,252 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,252 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,252 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,252 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,252 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596454) LogMinerEventRow{scn=289931596455, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='01001a0097b62500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b58.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 01001a0097b62500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596458, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0f0005002bf02900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b59.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0f0005002bf02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596461, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='040004007ed22a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b5d.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 040004007ed22a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,252 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596464, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1300090070b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b60.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 1300090070b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596467, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0e00140061422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b67.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0e00140061422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596470, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0900100050ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b6c.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0900100050ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596473, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='04001500aace2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b70.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 04001500aace2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596476, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0b001d009b0d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b74.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0b001d009b0d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596479, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0c000b00da572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b7d.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0c000b00da572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596482, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0d00000083d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b81.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0d00000083d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596485, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='03001f00c3ad2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b8e.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 03001f00c3ad2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596488, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1000150063302a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027b9e.0190, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 1000150063302a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596491, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='060020000f0a2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bae.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 060020000f0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596494, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0a001200a3233100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bbe.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0a001200a3233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596497, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='01000600a3bd2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bc3.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 01000600a3bd2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596500, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='13000f00bcbe2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bc7.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 13000f00bcbe2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990252 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741657ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596503, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0b000800560b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bcc.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Transaction 0b000800560b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,252 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596506, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0e001b00e7462900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bd0.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0e001b00e7462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596509, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='10000c00dd2d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bd5.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 10000c00dd2d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596512, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='02002100ccca2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bd8.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 02002100ccca2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596515, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='09001d00baea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bdd.0180, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 09001d00baea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596518, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='04001c00d7cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027be1.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 04001c00d7cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596521, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='05000d00f0752b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027beb.0030, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 05000d00f0752b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596524, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1400050096ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bee.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 1400050096ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596527, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0f0002008ff22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bf9.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0f0002008ff22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596530, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='06000000ef062a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027bfc.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 06000000ef062a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596533, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='08000b00335f2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c03.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 08000b00335f2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596536, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='11001e00dd2d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c07.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 11001e00dd2d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596538) LogMinerEventRow{scn=289931596539, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='070009002bc42d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c24.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 070009002bc42d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596538) LogMinerEventRow{scn=289931596541, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0700200002c52d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c32.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0700200002c52d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596543, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='070019009dd02d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c63.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 070019009dd02d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596546, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='01001100d0b42500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c67.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 01001100d0b42500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || write(array HeapByteBuffer@3b112430[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596549, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1200030038472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c6b.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 1200030038472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596552, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='1300000041b42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c82.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 1300000041b42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || sendResponse info=null content=HeapByteBuffer@17d4dd6d[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596555, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0c001b0044572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c87.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0c001b0044572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596558, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='0a001c00dc1f3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027c8a.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0a001c00dc1f3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596561, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:20Z, transactionId='05000100f06e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cb7.0040, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 05000100f06e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596564, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02000d0089ce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cbb.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 02000d0089ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596567, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e000d005a422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cbf.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0e000d005a422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596570, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11001a00bb312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cca.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 11001a00bb312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596573, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='140014000fae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ccf.017c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 140014000fae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || write(array HeapByteBuffer@15ab53da[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596576, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d0003003bdb2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cd3.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,253 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3a09846] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0d0003003bdb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || sendResponse info=null content=HeapByteBuffer@158da108[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,253 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@17d4dd6d[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,253 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596579, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b001f009e072f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ce4.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Transaction 0b001f009e072f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,253 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,253 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3a09846] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,253 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@17d4dd6d[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,253 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,253 DEBUG || flushed 1574 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,253 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,253 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,254 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3a09846] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,254 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,254 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,254 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 DEBUG || complete(org.eclipse.jetty.util.Callback$3@4e45444c) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,254 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,254 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,254 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,254 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,254 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,254 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,254 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,254 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,254 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,255 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,255 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,255 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,255 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,255 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,253 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,255 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@50889d3] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,255 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@158da108[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,254 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,253 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596582, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00170038f12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ce8.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0f00170038f12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,255 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,255 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=5/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596585, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001900251b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cec.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0a001900251b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596588, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10000900ae342a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cf8.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 10000900ae342a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596591, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0600060030072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cfc.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0600060030072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596594, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='090005009aef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027cff.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 090005009aef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596597, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001c00abb22800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d0c.0104, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 03001c00abb22800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596600, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001900e56c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d10.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 05001900e56c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596603, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001e0025a82b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d14.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 14001e0025a82b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596605) LogMinerEventRow{scn=289931596606, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400180066cd2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d35.0074, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0400180066cd2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596605) LogMinerEventRow{scn=289931596608, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400160045ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d42.01d8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0400160045ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596609, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001a0005d22a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d4c.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 04001a0005d22a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596612, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0100180096b32500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d50.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0100180096b32500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596615, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000b00e6cf2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d55.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 07000b00e6cf2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596618, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1300140051b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d59.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 1300140051b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596621, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08000000765b2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d62.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 08000000765b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596624, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c0009006a5b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d66.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0c0009006a5b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596627, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f0001000ff02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d7c.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0f0001000ff02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596630, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001a00b5d02700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d80.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 02001a00b5d02700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596633, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e000f00e24c2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d84.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0e000f00e24c2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596636, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12001700654a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d89.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 12001700654a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596639, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d001e0034d42900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d8e.0100, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0d001e0034d42900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596642, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0900150000f42f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d91.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 0900150000f42f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596645, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11000f00582e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d9c.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 TRACE Oracle|vk_nau27|streaming Transaction 11000f00582e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596648, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001f006dd02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027d9f.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 04001f006dd02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596651, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0700150054c32d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027db4.0100, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0700150054c32d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596654, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a000a002b1b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027db7.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0a000a002b1b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596657, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001f00cb712b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027dc8.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 05001f00cb712b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596660, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1300080050be2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027dcc.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 1300080050be2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596663, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='140013007fb12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027dd3.0198, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 140013007fb12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596666, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b000a00370c2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027dd8.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0b000a00370c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596669, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c0006004d552b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ddd.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0c0006004d552b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596672, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='030018002db72800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027de1.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 030018002db72800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596675, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='020004006bd12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027de6.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 020004006bd12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596678, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='110006007d2d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027de9.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 110006007d2d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596681, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000f004bc72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027df0.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 07000f004bc72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596684, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c00040065572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027df4.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0c00040065572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596687, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02002000a5cf2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e01.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 02002000a5cf2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596690, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00140084e72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e05.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0f00140084e72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596692) LogMinerEventRow{scn=289931596693, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a0013009b193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e2d.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0a0013009b193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596695, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001500e61c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e5a.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0a001500e61c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596698, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001700e53e2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e5e.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0e001700e53e2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596701, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1000200006302a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e65.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 1000200006302a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596704, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05000c0071762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e6a.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 05000c0071762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596707, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001000a7b42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e6e.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 14001000a7b42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596710, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f000f0075f22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e83.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0f000f0075f22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596714, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06001100b8052a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e88.0044, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 06001100b8052a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596717, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09001e0036f02f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e8c.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 09001e0036f02f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596720, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001d00ff1d3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027e8f.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0a001d00ff1d3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596723, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='080004002e5b2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ea6.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 080004002e5b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596727, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11000200902d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027eb5.0070, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 11000200902d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596731, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001e00a26f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027ed0.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 05001e00a26f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596735, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12001100e24c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027edf.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 12001100e24c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596739, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001100e1ac2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027eef.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 14001100e1ac2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596743, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000c0037c62d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f05.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 07000c0037c62d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596747, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c001300295b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f21.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0c001300295b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596751, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d001900c8dd2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f31.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0d001900c8dd2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596755, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000e001fbb2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f4c.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 01000e001fbb2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596759, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f0012004fed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f61.017c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0f0012004fed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596763, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04000600d4cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f77.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 04000600d4cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596767, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a000900c3193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027f8c.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0a000900c3193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596769) LogMinerEventRow{scn=289931596771, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='050018006c6e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027fd4.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 050018006c6e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596773, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05002100f1702b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00027fee.0148, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 05002100f1702b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596777, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02000f00a5cc2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028003.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 02000f00a5cc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596781, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0300090023b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028013.01a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0300090023b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596785, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13000d00efb72b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028024.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 13000d00efb72b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596789, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b00130046102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028033.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0b00130046102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596793, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e000b0060462900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002804f.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 0e000b0060462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596795) LogMinerEventRow{scn=289931596797, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09000c0049ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028073.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 09000c0049ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596799, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='090007008eef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028094.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,256 TRACE Oracle|vk_nau27|streaming Transaction 090007008eef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596803, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10000200cd382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280a5.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 10000200cd382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596807, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06001700910b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280b4.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 06001700910b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596811, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001900a1d52a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280c5.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 04001900a1d52a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596815, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13000e0022b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280d4.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 13000e0022b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596819, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='080016009d622e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280de.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 080016009d622e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596823, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000200afb82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280ee.01e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 01000200afb82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596827, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='110005008a2e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000280ff.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 110005008a2e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596829) LogMinerEventRow{scn=289931596832, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e0006008d492900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028127.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0e0006008d492900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596833, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001200db3d2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002813a.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0e001200db3d2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596837, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b001400590e2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002814b.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0b001400590e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596841, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14000400fdb12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028161.0064, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 14000400fdb12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596845, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00040006ed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028170.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0f00040006ed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596849, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12000b002e4f2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028181.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 12000b002e4f2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596853, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a000f00cc233100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028191.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0a000f00cc233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596857, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='100021008f352a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281a1.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 100021008f352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596861, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0600100028de2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281b1.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0600100028de2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596865, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0500200006762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281c6.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0500200006762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596868, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000500a50b2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281cb.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 07000500a50b2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596871, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000b0056b72500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281cf.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 01000b0056b72500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596874, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d000b004bd72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281df.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0d000b004bd72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596878, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001600a4ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000281fa.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 14001600a4ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596882, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09001c003cee2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002820a.0014, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 09001c003cee2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596886, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00110005ee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002821a.0034, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0f00110005ee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596890, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001500a6b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002822a.00bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 03001500a6b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596894, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a00080019213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002823a.0060, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0a00080019213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596898, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05000e00cb6f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002824f.01d4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 05000e00cb6f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596900) LogMinerEventRow{scn=289931596901, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04000e0035cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002826f.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 04000e0035cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596903, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400200006c82a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028284.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0400200006c82a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596905) LogMinerEventRow{scn=289931596908, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b001200430a2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000282a8.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0b001200430a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596905) LogMinerEventRow{scn=289931596909, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b002000240f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000282b5.00d8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0b002000240f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596910, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b002100fc0f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000282c2.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0b002100fc0f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596914, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='130012009cb92b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000282d7.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 130012009cb92b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596918, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10000100b8352a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000282ed.0070, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 10000100b8352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596922, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c0019000c532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028302.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0c0019000c532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596926, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e00010080452900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028318.01c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0e00010080452900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596930, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0600150017072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028328.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 0600150017072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596934, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08000f00ff582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028339.00bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 08000f00ff582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596937, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12001500e5472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002833d.0108, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 12001500e5472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596941, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001900facd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028352.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1886, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,255 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 02001900facd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596944, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1100010046302b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028355.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 1100010046302b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596947, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='070011008bc82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028368.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Transaction 070011008bc82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,255 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,257 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,257 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.257979+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,255 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.258049+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=8/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,257 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596951, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001f00b2ac2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002836e.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 14001f00b2ac2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.258112+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596954, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f000c0083f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002838e.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0f000c0083f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596956, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a0014002e223100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028390.0084, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0a0014002e223100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@50889d3] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596959, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0500160082172b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028394.0108, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>\non\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@158da108[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,258 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:30.258184+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,258 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=8/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,258 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,258 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,258 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=8/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,258 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,258 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0500160082172b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596962, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01001d0008b92500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000283de.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 01001d0008b92500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596965, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14001b0022af2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000283e1.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 14001b0022af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596968, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c000100af562b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000283e5.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0c000100af562b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596971, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b001600990b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000283ea.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0b001600990b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596974, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0900040068ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028416.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0900040068ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596977, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d000c0069d32900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028419.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0d000c0069d32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596980, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f000b003fe82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028430.0034, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0f000b003fe82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596983, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001e0028ce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028433.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 02001e0028ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596986, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1100180038292b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028466.00d8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 1100180038292b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596990, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400140031ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002846c.0084, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0400140031ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931596992) LogMinerEventRow{scn=289931596993, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001d00a6b52800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000284b9.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 03001d00a6b52800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596994, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03000200afb12800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000284f6.0094, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 03000200afb12800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931596997, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0700160028c12d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000284f8.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0700160028c12d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597000, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='100012009e322a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000285ad.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 100012009e322a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597004, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06001e00e8092a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000285b3.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 06001e00e8092a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597007, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='130007001ab52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000285dd.0144, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 130007001ab52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597010, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a00010032233100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000285df.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0a00010032233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597013, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c000f00eb562b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028621.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 0c000f00eb562b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597017, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='080017003c582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028627.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 080017003c582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597020, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001200a2ce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028661.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Transaction 02001200a2ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597023, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='110013001a2c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028663.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 110013001a2c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597026, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12000600f4492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000286b9.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 12000600f4492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597030, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000600f3ce2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000286c0.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 07000600f3ce2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597033, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000300e5be2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002872d.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 01000300e5be2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597036, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c001a00545b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028730.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0c001a00545b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597039, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b000100da0e2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000287ca.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0b000100da0e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597043, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d0018001ad52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000287d0.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0d0018001ad52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597046, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001b00f7b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028819.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 03001b00f7b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597049, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10001f000f352a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002881c.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 10001f000f352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597052, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0600040013072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028886.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0600040013072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597056, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='080007002c5a2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002888d.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 080007002c5a2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597060, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e00030010412900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028893.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0e00030010412900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597064, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0500110050602b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028899.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0500110050602b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597067, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000900acb42500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000288c3.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 01000900acb42500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597070, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12000400eb4c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000288c6.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 12000400eb4c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597073, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d00100061d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028912.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0d00100061d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597077, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001c00cfc72700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028918.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 02001c00cfc72700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597081, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1400090055b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002891e.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 1400090055b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597085, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='090016001ef32f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028924.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 090016001ef32f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597089, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='030003004a792800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002892a.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 030003004a792800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597093, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001d001e9b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028930.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 04001d001e9b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597097, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00150084ee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028936.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,258 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,258 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=8/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=626) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384995 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,259 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1889, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,259 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,259 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,259 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=38]@2023-08-25T13:56:30.25976+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 0f00150084ee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,258 DEBUG || flushed 311 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=16/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=9} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,259 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597101, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11000d0044282b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002893c.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Transaction 11000d0044282b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=38]@2023-08-25T13:56:30.259824+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,259 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,259 DEBUG || filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,259 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,260 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384993 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,259 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=38]@2023-08-25T13:56:30.259886+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,259 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597104, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13000400b4b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028980.00d4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Transaction 13000400b4b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,260 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=39]@2023-08-25T13:56:30.260208+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597107, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08001800d8602e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028982.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Transaction 08001800d8602e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,259 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597110, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e0007007f472900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028a01.00b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Transaction 0e0007007f472900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,260 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@50889d3] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,260 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,260 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,260 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,260 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,260 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,260 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,260 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5ce40706) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,260 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,260 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,260 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,260 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,260 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,260 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 11 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,260 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,260 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,260 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,260 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,260 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,260 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,260 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,260 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,260 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,260 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,260 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,260 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=11} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,260 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,261 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,261 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,261 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,261 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,261 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,260 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,260 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,259 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,260 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597114, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12001000a44a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028a07.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Transaction 12001000a44a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 DEBUG || filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597117, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d000100dddb2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028a41.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Transaction 0d000100dddb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597120, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000d0054c72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028a43.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Transaction 07000d0054c72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597122) LogMinerEventRow{scn=289931597124, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09001200c8ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028a76.0040, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Transaction 09001200c8ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,261 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597126, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09000e0090ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028ac6.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 TRACE Oracle|vk_nau27|streaming Transaction 09000e0090ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,261 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597130, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b001700bf092f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028acc.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,261 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,261 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0b001700bf092f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597133, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001f0001213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028afb.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0a001f0001213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597136, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001e0019d22a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028afe.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 04001e0019d22a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597139, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001900b0b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028b47.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 03001900b0b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597143, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='050005003e702b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028b4d.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 050005003e702b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,262 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597146, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10000300e4322a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028b7c.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 10000300e4322a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597149, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c000e0014532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028b7f.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0c000e0014532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597152, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='140021003fae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028bc8.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 140021003fae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597156, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f001a002beb2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028bce.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0f001a002beb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597159, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08000d00975c2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028bf3.009c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 08000d00975c2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,262 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597162, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0600010001fc2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028bf5.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0600010001fc2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597165, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a0003000c203100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c30.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0a0003000c203100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597169, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02000000c0cd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c36.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 02000000c0cd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,262 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,262 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597172, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='130021001bb72b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c5b.00b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 130021001bb72b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597175, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='120018007d522a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c5d.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 120018007d522a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597178, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001100bd432900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c99.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0e001100bd432900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597182, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d00200096d82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028c9f.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 0d00200096d82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597185, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='030007005cb52800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028cc9.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Transaction 030007005cb52800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597188, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0900170099e72f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028ccc.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0900170099e72f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597191, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11001600e9322b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d0e.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 11001600e9322b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597195, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000d0073ba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d14.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 01000d0073ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597198, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07000e0062c72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d34.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 07000e0062c72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597201, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001000026e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d36.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 05001000026e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,262 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597204, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b00000020102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d6b.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,263 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,263 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,263 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0b00000020102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597208, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400100099c82a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d71.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0400100099c82a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597211, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10001d00e92d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d9b.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 10001d00e92d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597214, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c001c00244f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028d9e.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0c001c00244f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597217, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1300020099b72b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028de0.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 1300020099b72b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597221, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0800090002572e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028de6.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0800090002572e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597224, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14000100dcb02b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028e10.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 14000100dcb02b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597227, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0200060081d02700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028e13.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0200060081d02700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597229) LogMinerEventRow{scn=289931597231, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11000b0087322b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028e44.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 11000b0087322b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597232, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11001b0002242b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028e7e.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 11001b0002242b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597236, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e00180044482900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028e85.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0e00180044482900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597239, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0900010027f02f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028ec9.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0900010027f02f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597242, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400070005cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028ecb.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0400070005cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597245, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='070008002ac62d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028f30.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 070008002ac62d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597249, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1200160017462a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028f36.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 1200160017462a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597252, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d0002007ace2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028f60.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0d0002007ace2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597255, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1300150036b52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028f63.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 1300150036b52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597258, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='03001e0020b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028fa5.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 03001e0020b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597262, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e00040006452900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00028fab.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0e00040006452900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597265, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c001200525b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029009.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0c001200525b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597268, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f002000e0e52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002900b.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0f002000e0e52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597271, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06001a00430a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029090.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 06001a00430a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597275, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001b00941e3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029097.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0a001b00941e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597279, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08001f00cd592e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002909d.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 08001f00cd592e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597282, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02000b007bd22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000290e1.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 02000b007bd22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597285, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0900090038f22f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000290e3.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0900090038f22f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597288, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0400080034d02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002914c.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 0400080034d02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597292, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='010007004ebe2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029152.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 010007004ebe2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597295, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05000300df6c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000291df.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 05000300df6c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597298, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12001b00c8462a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000291e1.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 12001b00c8462a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597300) LogMinerEventRow{scn=289931597301, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13000a009db32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029238.00ec, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 13000a009db32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597303, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13000500a0b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000292c8.0074, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 13000500a0b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597307, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14000c00b6b02b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000292ce.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Transaction 14000c00b6b02b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,263 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597310, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001a00d4432900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000292ee.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 0e001a00d4432900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597313, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09000d0089ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000292f0.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 09000d0089ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597316, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b00030096082f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029325.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 0b00030096082f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597320, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f0007003cee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002932b.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 0f0007003cee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597323, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10001100e8342a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002935a.01dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 10001100e8342a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597326, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a000600e8213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002935d.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Transaction 0a000600e8213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,264 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990264 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741645ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,264 DEBUG || write(array HeapByteBuffer@24ae202[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,264 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,264 DEBUG || sendResponse info=null content=HeapByteBuffer@4d12ac4[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,265 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,265 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4501888d] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,265 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@4d12ac4[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,265 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,265 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,265 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4501888d] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,265 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>\non\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@4d12ac4[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,265 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,265 DEBUG || flushed 950 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,265 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,265 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,265 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4501888d] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,265 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,265 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,263 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,265 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,265 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,265 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,264 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597329, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d000500e1d82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293a6.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 0d000500e1d82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597333, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0300040036bd2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293ac.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 0300040036bd2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 DEBUG || complete(org.eclipse.jetty.util.Callback$3@87e4500) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,265 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990265 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597337, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05000600a76f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293b2.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 05000600a76f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597341, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1400020070af2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293b8.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 1400020070af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597345, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f001c0061f12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293be.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 0f001c0061f12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597348, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06000c00f80a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293f8.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 06000c00f80a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597351, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08001d00e05d2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000293fa.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 08001d00e05d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597354, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1200000028452a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002945e.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 1200000028452a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597358, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11001c00af2a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029465.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 11001c00af2a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597361, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='040001002dcf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000294b4.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 040001002dcf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597364, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a001a009e223100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000294b6.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 0a001a009e223100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597366) LogMinerEventRow{scn=289931597368, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001d00c96f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000294ea.00ec, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE Oracle|vk_nau27|streaming Transaction 05001d00c96f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597370, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001b00d2712b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029551.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Transaction 05001b00d2712b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597374, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='010020001eba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029557.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Transaction 010020001eba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597377, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13001a001dba2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029577.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Transaction 13001a001dba2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597380, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='14000700fcaf2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029579.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Transaction 14000700fcaf2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,266 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,266 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,266 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,266 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,266 DEBUG || write(array HeapByteBuffer@698e9b0b[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,266 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,266 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,266 DEBUG || sendResponse info=null content=HeapByteBuffer@2c77f809[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,266 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.266314+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,266 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,266 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@595d1fe] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,266 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@2c77f809[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,266 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,266 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.266428+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,265 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,266 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,266 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@595d1fe] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,266 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\n\x00\x00...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2c77f809[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,266 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,266 DEBUG || flushed 1694 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,266 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,266 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,266 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@595d1fe] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,266 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,266 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,266 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,266 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,266 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,266 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,266 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1d7d4d81) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,267 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,267 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,267 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,267 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,267 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,267 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,267 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,267 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,267 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,267 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,267 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,267 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,267 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,267 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,266 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.266386+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,266 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597383, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07001f00a4cf2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000295ae.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Transaction 07001f00a4cf2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,265 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741644ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,267 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597387, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00190052f62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000295b4.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,266 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,266 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:56:30.266495+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,267 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,267 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Transaction 0f00190052f62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597390, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001d0099452900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000295d9.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Transaction 0e001d0099452900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597393, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d000f0025d72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000295db.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Transaction 0d000f0025d72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597396, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09000b0085ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029616.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Transaction 09000b0085ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597400, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b000f00ed0e2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002961d.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0b000f00ed0e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597403, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='030001004cb42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029647.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 030001004cb42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597406, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10001000f4322a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002964a.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 10001000f4322a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597409, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0c002000b8582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002968c.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0c002000b8582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597413, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04000500b8cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029693.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 04000500b8cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597416, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='06000f00dc082a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000296c2.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 06000f00dc082a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597419, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13001e00a7b52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000296c5.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 13001e00a7b52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597422, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e001000733e2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002970e.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0e001000733e2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597426, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='01000c008db12500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029714.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 01000c008db12500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597429, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08000a00f8592e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002979c.0064, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 08000a00f8592e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597432, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a0002003c1a3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002979e.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0a0002003c1a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597435, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='02001000e2d72700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002985a.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 02001000e2d72700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597439, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0b000400aa102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029860.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0b000400aa102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597442, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='10000d0095302a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002987b.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 10000d0095302a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597445, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='09001a00b6ee2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002987d.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 09001a00b6ee2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597448, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='11001700632e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298ab.0084, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 11001700632e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597452, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='04001700d1cd2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298b1.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 04001700d1cd2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597456, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='12000200764a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298b7.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 12000200764a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597460, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='13001d00cc922b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298bd.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 13001d00cc922b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597463, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0500150021712b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298dd.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0500150021712b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597466, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1400150081ae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.000298df.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 1400150081ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597469, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0d00130058d22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002991d.00d8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0d00130058d22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597473, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0f00090014ee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029923.007c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0f00090014ee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597476, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='060002007b0a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029981.00c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 060002007b0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597479, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0100120093b42500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029983.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0100120093b42500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597482, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0a0021000a1d3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029a09.0028, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0a0021000a1d3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597486, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0e000a0067492900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029a0f.00c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0e000a0067492900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597489, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='090019009ee92f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ab0.01e0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 090019009ee92f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597492, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='0300170024b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ab3.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0300170024b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597494) LogMinerEventRow{scn=289931597495, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='080021005e602e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029af8.00c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 080021005e602e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597497, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='08001200fb5f2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029bb6.0138, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 08001200fb5f2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597500, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='05001400c36e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029bbb.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 05001400c36e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597503, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='07001b00ddca2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029bbe.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 07001b00ddca2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597506, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:21Z, transactionId='1200210093492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029be2.0034, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 1200210093492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597509, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='0b000d00df0b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029be6.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0b000d00df0b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597512, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='0c00030001542b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029bea.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0c00030001542b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597515, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='14001d00deb42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c28.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 14001d00deb42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597518, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='0f000e001cf22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c2c.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0f000e001cf22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597521, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='10001a00bf2d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c30.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 10001a00bf2d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597524, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='0d00070012da2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c54.0064, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0d00070012da2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597527, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='06000700110a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c58.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 06000700110a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597530, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='0a000d00bd1e3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c5c.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0a000d00bd1e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597533, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:22Z, transactionId='030005003cb02800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c7a.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 030005003cb02800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597536, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:23Z, transactionId='040021004bd12a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c7e.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 040021004bd12a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597539, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:23Z, transactionId='08000100c35b2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029c82.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 08000100c35b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597542, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:23Z, transactionId='12000a006e4c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cac.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 12000a006e4c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597547, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='1300030037b82b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cb1.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 1300030037b82b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597550, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='050002006a6d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cb5.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 050002006a6d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597553, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='14001c00a2a92b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cc0.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 14001c00a2a92b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597556, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='02000100fac42700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cc5.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 02000100fac42700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597559, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0d000a006ad42900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cca.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 0d000a006ad42900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597562, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='01001300c4b62500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cce.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,268 TRACE Oracle|vk_nau27|streaming Transaction 01001300c4b62500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597565, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='03000c0080b32800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cdf.0188, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 03000c0080b32800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597568, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0f001f002aec2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ce3.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0f001f002aec2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597571, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='1100190005312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cf5.0034, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 1100190005312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597574, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0a002000c51b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cf9.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0a002000c51b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597577, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0e000800d3432900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029cfd.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0e000800d3432900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597580, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='05000b003d742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d06.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 05000b003d742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597583, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='08000500295c2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d0b.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 08000500295c2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597586, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0900000025ed2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d0f.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0900000025ed2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597588) LogMinerEventRow{scn=289931597589, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0b001500100c2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d33.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0b001500100c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597591, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0b000600cc0f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d41.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0b000600cc0f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597594, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0700040097cc2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d45.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0700040097cc2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597597, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='040002008ccc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d4a.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 040002008ccc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597600, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='12001a0018482a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d4e.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 12001a0018482a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597603, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='14000b0011b02b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d5d.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 14000b0011b02b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597606, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='130001008cb52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d61.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 130001008cb52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597609, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0d001c00ddd12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d6d.0044, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0d001c00ddd12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597612, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='03000b0031b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d71.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 03000b0031b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597615, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='08000c00455d2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d75.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 08000c00455d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597617) LogMinerEventRow{scn=289931597618, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='12001200124b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029d98.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 12001200124b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597620, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='12000e0028472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029dcd.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 12000e0028472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597623, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0e001900eb432900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029dd2.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0e001900eb432900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597626, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0d000900b5dc2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029dd6.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0d000900b5dc2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597629, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='030016003ab32800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029df1.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 030016003ab32800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597632, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='08001100445c2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029df5.01a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 08001100445c2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597635, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0c000d00b1582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029df9.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0c000d00b1582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597638, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='12001f00c9462a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e2a.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 12001f00c9462a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597641, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='020002003acc2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e2f.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 020002003acc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597644, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='11000300012e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e33.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 11000300012e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597647, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0f002100c7ec2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e42.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0f002100c7ec2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597650, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='07001c00a3cf2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e47.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 07001c00a3cf2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597653, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0d001200ecd72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e4b.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0d001200ecd72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597656, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='10000700b5332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e60.01dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 10000700b5332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597659, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0900020050ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e65.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0900020050ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597662, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0a000b001a213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e69.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0a000b001a213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597665, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:24Z, transactionId='0400090013d42a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e78.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0400090013d42a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597668, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c001e00505c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e7d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0c001e00505c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597671, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='020016007acc2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e81.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 020016007acc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597674, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06001900ea0d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e93.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 06001900ea0d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597677, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='130013004faf2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e97.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 130013004faf2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597680, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05001c00bf702b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029e9b.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 05001c00bf702b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597683, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='14001a0071b12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029eaa.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 14001a0071b12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597686, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03000e0075b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029eaf.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 03000e0075b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597689, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11000000e8312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029eb3.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 11000000e8312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597692, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='07001a00c3c72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ec0.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 07001a00c3c72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597695, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f000300cdf02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ec5.017c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0f000300cdf02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597698, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c001000d8542b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ec9.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0c001000d8542b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597700) LogMinerEventRow{scn=289931597701, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a001000f81a3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029ef9.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0a001000f81a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597702, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a001e0082173100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f06.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0a001e0082173100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597705, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='08001000655f2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f0b.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 08001000655f2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597708, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01001f009ab92500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f0f.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 01001f009ab92500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597711, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0200180005ce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f1f.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,269 TRACE Oracle|vk_nau27|streaming Transaction 0200180005ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597714, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12000f00bf4c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f23.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 12000f00bf4c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597717, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05001a0015742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f27.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 05001a0015742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597720, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e00200020412900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f41.0050, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0e00200020412900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597723, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='14000d00b6ae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f45.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 14000d00b6ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597726, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11001f00e3302b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f49.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 11001f00e3302b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597729, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0700000015cb2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f5e.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0700000015cb2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597732, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c002100a75a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f63.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0c002100a75a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597734, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b000200440a2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f66.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0b000200440a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597737, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f00060065f82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f71.018c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0f00060065f82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597740, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d000800d4d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f76.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0d000800d4d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597743, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='09000a00719f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f7a.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 09000a00719f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597745) LogMinerEventRow{scn=289931597746, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a0011004d193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029f9c.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0a0011004d193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597748, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a0016009a1d3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029faa.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0a0016009a1d3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597751, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03000800cfaf2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029faf.0040, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 03000800cfaf2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597754, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='04001b0008c52a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029fb2.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 04001b0008c52a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597757, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05000f0033762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029fc3.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 05000f0033762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597760, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10000000bb362a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029fc8.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 10000000bb362a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597763, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1400200061b12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.00029fcc.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 1400200061b12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597766, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='08001900e9552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a002.00b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 08001900e9552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597769, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='130020004fb42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a006.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 130020004fb42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597772, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12000100b1492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a00a.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 12000100b1492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597775, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e001c00e53f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a019.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0e001c00e53f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597778, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06000a0067042a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a01d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 06000a0067042a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597781, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02000700b5d22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a021.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 02000700b5d22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597784, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0100140056b52500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a03a.0180, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0100140056b52500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597787, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11000700c32d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a03f.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 11000700c32d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597790, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='07001400dcc92d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a043.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 07001400dcc92d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597793, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d001d008cd82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a060.009c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0d001d008cd82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597796, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='030013004bae2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a064.0108, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 030013004bae2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597799, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f001e004bea2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a067.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0f001e004bea2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597802, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='09000600d4ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0b1.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 09000600d4ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597805, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='04000a0031862a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0bc.00e0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 04000a0031862a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597808, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000400511f3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0c9.0030, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0a000400511f3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597811, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='08001e00bd612e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0d4.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 08001e00bd612e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597814, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c001d00b5542b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0df.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 0c001d00b5542b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597817, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1200140036432a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0ea.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Transaction 1200140036432a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,267 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,270 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,270 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 8 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,270 DEBUG || filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,270 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=8} filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,270 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=8} parse HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,270 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,270 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,270 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,270 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,270 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,271 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,271 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=9} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,271 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,271 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,271 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,271 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,271 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,271 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,271 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,271 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,271 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,271 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,271 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,267 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,271 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,271 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,270 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597820, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d001a00c4d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a0f4.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,271 TRACE Oracle|vk_nau27|streaming Transaction 0d001a00c4d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,271 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,267 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,267 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,271 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597823, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0200150040ce2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a100.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,271 TRACE Oracle|vk_nau27|streaming Transaction 0200150040ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597826, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1300190053af2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a10d.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 1300190053af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597829, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11000a00112d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a118.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 11000a00112d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597832, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b001e00830d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a124.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0b001e00830d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597835, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e000200d13f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a12f.0164, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0e000200d13f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597838, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03000f00d0bb2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a13a.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 03000f00d0bb2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597841, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05001200006f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a146.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 05001200006f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597844, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10000f0063332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a153.0028, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 10000f0063332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597847, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0700010038c52d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a15d.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0700010038c52d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931597849) LogMinerEventRow{scn=289931597851, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c00070006522b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a177.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0c00070006522b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597852, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c001f0006572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a190.008c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0c001f0006572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597855, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0800060027592e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a19a.01c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0800060027592e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597858, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02001f000ece2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1a6.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 02001f000ece2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597861, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06001600dc062a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1b1.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 06001600dc062a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597864, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1400080045b12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1bc.01c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 1400080045b12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597867, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f0010008aed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1c7.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0f0010008aed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597870, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11000800772e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1d3.00d4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 11000800772e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597873, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='09000800e2ea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1dd.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 09000800e2ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597876, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a00170004193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1e7.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0a00170004193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597878, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12001c00004f2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1f1.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 12001c00004f2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597881, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0100040055b72500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a1fd.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0100040055b72500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,271 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,271 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597884, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d001100bbd92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a208.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,271 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,272 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,271 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=4/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,272 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,272 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Transaction 0d001100bbd92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,272 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,272 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,272 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,272 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=4/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,272 DEBUG || ReservedThread@2da5db67{RUNNING,thread=Thread[qtp974320615-176,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,272 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,272 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,273 DEBUG || write(array HeapByteBuffer@5df1ecff[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,273 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,274 DEBUG || sendResponse info=null content=HeapByteBuffer@507b7a8f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@19f7c5ed] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,274 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@507b7a8f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,274 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,274 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,274 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@19f7c5ed] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,274 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\n\non\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@507b7a8f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,274 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,272 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,272 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 DEBUG || flushed 198 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,272 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597887, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03001200cdb02800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a212.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 03001200cdb02800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,274 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597890, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b000c00ec0b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a21d.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0b000c00ec0b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597893, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10001b00cb382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a229.01c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 10001b00cb382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597896, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0700100085c62d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a234.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0700100085c62d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,274 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597899, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='040013004bcd2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a23e.0144, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 040013004bcd2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@19f7c5ed] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,274 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597902, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c000a0083512b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a249.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0c000a0083512b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,274 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597905, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0800080084552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a257.0034, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0800080084552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,274 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,274 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,274 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,274 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597908, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12000700964c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a261.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 12000700964c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,274 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597911, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='13001c00c5b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a26c.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,274 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 13001c00c5b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597914, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0500170052732b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a277.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0500170052732b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || complete(org.eclipse.jetty.util.Callback$3@635bd596) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597917, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='14001800d0aa2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a281.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 14001800d0aa2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597920, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f000d0039f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a28c.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Transaction 0f000d0039f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,274 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597923, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a001800911b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a297.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0a001800911b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,274 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,275 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597926, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06001300d6022a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2a2.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 06001300d6022a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597929, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='010010002db72500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2ae.01cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 010010002db72500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,275 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,275 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,275 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597932, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d00210094d52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2b9.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0d00210094d52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597934, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02000800fece2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2c4.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 02000800fece2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597937, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b0007001d112f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2cf.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0b0007001d112f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597939, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1000060074332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2d9.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 1000060074332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597941, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11001000d02b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2e3.0020, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 11001000d02b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597945, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0700030075ba2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2ee.01b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0700030075ba2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597947, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0500070010742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a2fd.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0500070010742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597949, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06000b001dfd2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a306.0070, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 06000b001dfd2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,275 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597951, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c00160022572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a325.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0c00160022572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597954, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e000900943f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a328.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,275 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,275 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0e000900943f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597957, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0200090065d12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a356.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0200090065d12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597960, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03000d001bb32800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a376.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 03000d001bb32800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:30.275617+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597963, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0800200026552e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a378.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 0800200026552e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597966, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11001200ed352b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3a7.00b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 11001200ed352b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,275 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:30.275701+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,275 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597970, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01001e0088ba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3bd.0170, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:30.275761+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,275 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,275 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,275 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Transaction 01001e0088ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,275 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,275 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:30.275853+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,275 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,275 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597973, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='140000001db22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3c2.016c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 140000001db22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597975, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='090013004cf12f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3c6.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 090013004cf12f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597978, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f000a008cf42900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3db.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0f000a008cf42900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597981, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000e00b41c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a3fb.0098, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0a000e00b41c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597984, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='050009005d772b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a400.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 050009005d772b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597987, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='04000b006cd02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a40b.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 04000b006cd02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597991, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12000500b64f2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a412.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 12000500b64f2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597995, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b000b00f80f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a419.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0b000b00f80f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931597999, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='07000a009ec82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a420.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 07000a009ec82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598003, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10000a00e2372a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a42a.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 10000a00e2372a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598007, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c001700e4582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a433.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0c001700e4582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598011, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06000500b1052a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a43a.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 06000500b1052a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598015, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d0004004cd12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a441.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0d0004004cd12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598019, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0200140058cc2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a448.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0200140058cc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598023, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11001500592b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a44f.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 11001500592b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598027, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='13001b00c3a42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a456.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 13001b00c3a42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598031, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0300110010b02800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a45f.00f8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0300110010b02800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598035, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01000f0044b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a465.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 01000f0044b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598039, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b001b00b70c2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a46e.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0b001b00b70c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598043, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e00000017442900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a474.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0e00000017442900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598047, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='08001c0033562e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a47c.0014, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 08001c0033562e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598051, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='14000a0059ab2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a482.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 14000a0059ab2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598055, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10001e00ed342a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a489.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 10001e00ed342a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598059, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0600080015fc2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a490.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0600080015fc2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598063, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01001900b4ba2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a499.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 01001900b4ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598067, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='07000700f7c82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4a2.0164, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 07000700f7c82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598071, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c000500475c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4a9.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0c000500475c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598075, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f001600f0f52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4b0.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0f001600f0f52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598079, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000700211e3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4b7.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0a000700211e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598083, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02000c00a4cd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4be.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 02000c00a4cd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598087, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05000a006d6f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4c5.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 05000a006d6f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598091, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='110009007d342b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4ce.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 110009007d342b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598095, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0700020082cc2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4d6.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0700020082cc2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598099, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='14000e008eae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4de.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 14000e008eae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598103, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c000c007e5a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4e6.00b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0c000c007e5a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598107, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b001c00f20e2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4ec.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0b001c00f20e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598110, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10001c002f332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4f2.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 10001c002f332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598113, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='09000300b5ed2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a4fa.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 09000300b5ed2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598116, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1200080025472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a500.00f8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 1200080025472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598119, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d00170097d92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a507.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0d00170097d92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598123, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='060021003c062a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a50e.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 060021003c062a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598126, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01000500e4b22500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a513.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 01000500e4b22500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598128, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b001000ce102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a516.00e0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0b001000ce102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598131, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f00180018ec2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a52b.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 0f00180018ec2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598134, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='100018006c332a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a530.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 100018006c332a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598137, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='06001c001e0e2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a535.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 06001c001e0e2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598140, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='03000600d6b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a53a.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 03000600d6b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598143, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='040012009bcc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a53d.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 TRACE Oracle|vk_nau27|streaming Transaction 040012009bcc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598145, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='13000c001db62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a560.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 13000c001db62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598148, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='080003009a032e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a563.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 080003009a032e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598151, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='01000a0057be2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a590.0148, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 01000a0057be2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598154, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='12001900154d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a593.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 12001900154d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598157, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000000cf1a3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5a2.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0a000000cf1a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598159, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05000000c5742b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5a6.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 05000000c5742b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598161, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='140006003bab2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5a9.009c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 140006003bab2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598163, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b001a00160c2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5ba.0050, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0b001a00160c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598166, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f00000095f32900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5be.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0f00000095f32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598169, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0e001e00023f2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5c3.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0e001e00023f2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598172, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02001b00f3d12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5c7.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 02001b00f3d12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598174, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000500991c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5f2.0040, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0a000500991c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598176, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='05000800b4732b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5f4.01e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 05000800b4732b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598178, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0d00160005d82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a5f8.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0d00160005d82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598180, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1400120034ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a62f.0190, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 1400120034ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598183, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='030000005cb62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a634.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 030000005cb62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598186, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10001600aa382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a638.0184, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 10001600aa382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598189, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f001b0018ef2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a63c.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0f001b0018ef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598192, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='080014006f582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a641.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 080014006f582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598195, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a000c003e033100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a667.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0a000c003e033100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598198, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='060014006d0a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a66b.01bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 060014006d0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598201, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='09001f00b7ed2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a66f.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 09001f00b7ed2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598204, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0100080055b52500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a69e.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0100080055b52500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598207, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0500040064752b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6a2.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0500040064752b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598210, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='110011004e2e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6a5.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 110011004e2e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598213, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1400170076b52b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6b3.0098, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 1400170076b52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598216, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='04000f00d5d02a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6b7.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 04000f00d5d02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598219, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0f00130066ef2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6bb.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0f00130066ef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,275 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,275 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,277 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598221, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='070013006fc82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6ce.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 070013006fc82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598224, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0c000000ad532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6d1.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 0c000000ad532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,276 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598227, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='02000300eece2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a6e2.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Transaction 02000300eece2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,277 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,277 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,277 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,277 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,277 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,277 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,277 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,278 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,278 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,278 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,278 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,278 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,278 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,278 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,278 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,278 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,278 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,278 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,278 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,278 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,278 DEBUG || write(array HeapByteBuffer@6714490d[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,278 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,278 DEBUG || sendResponse info=null content=HeapByteBuffer@43d40f05[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,278 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,278 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4388c35] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,278 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@43d40f05[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,278 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,278 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,278 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4388c35] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,278 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>cation/js...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@43d40f05[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,278 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || flushed 1071 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=11/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,277 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,279 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,279 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,279 DEBUG || write(array HeapByteBuffer@7cc7d910[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,279 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,279 DEBUG || sendResponse info=null content=HeapByteBuffer@4dc94c50[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,279 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,279 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4aac9f07] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,279 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@4dc94c50[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,279 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,279 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,279 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4aac9f07] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,279 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@4dc94c50[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || flushed 226 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,279 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4aac9f07] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,279 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,279 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,279 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,279 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,279 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,279 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,279 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2eafc53a) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,279 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,279 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,279 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,279 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,279 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,277 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960990277 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,279 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,279 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,277 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,277 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598230, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='11000c006a2a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a707.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,279 TRACE Oracle|vk_nau27|streaming Transaction 11000c006a2a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,279 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 741630ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:30,279 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,279 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,279 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,279 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598233, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='1200200080492a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a70a.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 1200200080492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4388c35] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,279 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598236, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0b000900280f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a740.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,280 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,280 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0b000900280f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598239, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='0a001200a4233100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a745.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0a001200a4233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598243, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='13001800b8b62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a74b.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 13001800b8b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598246, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:25Z, transactionId='10000400d6382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a770.0018, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 10000400d6382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598249, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0e001500fe482900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a790.00b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0e001500fe482900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598252, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='09002000f6f82f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a793.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 09002000f6f82f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598255, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0d00140014da2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7c1.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0d00140014da2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598259, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='05001300366f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7c7.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 05001300366f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598262, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='06000e00a7072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7cb.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 06000e00a7072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598265, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='070012001cca2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7ed.0020, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 070012001cca2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598268, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='140019009cb32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7f1.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 140019009cb32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598271, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='03002000ffb82800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a7f4.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 03002000ffb82800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598274, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='01001500b1b92500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a80a.0014, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 01001500b1b92500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598277, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0f001d0071f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a82e.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0f001d0071f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598280, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0a001c00dd1f3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a830.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0a001c00dd1f3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598283, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='05000d00f1752b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a835.00f8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 05000d00f1752b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598286, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0800130070582e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a867.01a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0800130070582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598289, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='12000900844a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a86a.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 12000900844a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598292, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0c001400e8502b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a900.017c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0c001400e8502b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598295, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='04000c00c5ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a903.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 04000c00c5ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598298, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0200170048d22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a941.00f4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0200170048d22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598301, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='13000600abae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a943.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 13000600abae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598303) LogMinerEventRow{scn=289931598304, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='11002100b3322b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a948.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 11002100b3322b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598303) LogMinerEventRow{scn=289931598307, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0e001600a3422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a94b.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0e001600a3422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598310, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0d000e00b8d12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a9a2.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0d000e00b8d12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598313, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='03001000bab82800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a9a5.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 03001000bab82800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598315) LogMinerEventRow{scn=289931598316, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0700210039c32d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a9aa.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0700210039c32d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598315) LogMinerEventRow{scn=289931598319, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0c000800cd5c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002a9ad.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0c000800cd5c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598322, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='08000200b85b2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aa3b.01d4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 08000200b85b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598325, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='09001100bff32f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aa3e.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 09001100bff32f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598328, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0b001800180d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aa9b.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0b001800180d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598331, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='02001100faca2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aa9d.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 02001100faca2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598334, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='10000e005d362a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aae3.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 10000e005d362a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598337, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='14000f0022ae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aae5.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 14000f0022ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598340, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='060018003b0b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ab56.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 060018003b0b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598343, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='110004005d2b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ab5a.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 110004005d2b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598346, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='070018009acc2d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ab5d.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 070018009acc2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598349, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0f000800b0ef2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aba1.01c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0f000800b0ef2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598352, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='04000000b5cc2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aba4.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 04000000b5cc2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598355, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='1300110008b32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002abe7.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 1300110008b32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598358, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0e001300fe422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002abea.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0e001300fe422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598360) LogMinerEventRow{scn=289931598361, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0a001900261b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002abef.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 TRACE Oracle|vk_nau27|streaming Transaction 0a001900261b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598364, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0c0018008d5a2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac36.01e0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0c0018008d5a2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598367, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0100160013be2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac3b.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0100160013be2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598370, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='05000100f16e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac3e.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 05000100f16e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598373, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='09001800e3ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac72.008c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 09001800e3ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598376, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0400030053d62a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac74.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0400030053d62a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598378) LogMinerEventRow{scn=289931598379, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0200050046cd2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ac79.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0200050046cd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598382, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='1400030095ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002acb8.0030, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 1400030095ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598385, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0f0005002cf02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002acbc.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0f0005002cf02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598388, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0a000a002c1b3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002acbf.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0a000a002c1b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598391, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='1300170045bb2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002acf8.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 1300170045bb2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598394, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='11001400c92c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002acfb.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 11001400c92c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598397, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='070017006bc72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ad2e.00f8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 070017006bc72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598400, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0b001900a80d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ad30.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0b001900a80d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,280 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,281 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,281 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598403, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='12000c00e34c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ad6f.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 12000c00e34c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,280 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,281 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598406, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0c00150091532b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ad71.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,281 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,281 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,281 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 0c00150091532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 DEBUG || complete(org.eclipse.jetty.util.Callback$3@4badbf2) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,281 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,281 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,281 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,281 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,281 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,281 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598409, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='02000e0059c92700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002adaf.00a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,281 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,281 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,281 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,281 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,281 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 02000e0059c92700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,281 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 7 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,281 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,281 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,281 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598412, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='1000130025372a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002adb1.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Transaction 1000130025372a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,281 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,281 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,281 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,282 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=8} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,281 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598415, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='05001900e66c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae09.0124, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,282 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,282 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 05001900e66c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,282 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,282 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598418, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='06000900acfe2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae0b.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 06000900acfe2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598420) LogMinerEventRow{scn=289931598421, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0e000500dc472900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae10.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0e000500dc472900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,282 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,282 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598424, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0100000046bb2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae53.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0100000046bb2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=8} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,282 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=39]@2023-08-25T13:56:30.282427+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598427, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='11001d00f9312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae56.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 11001d00f9312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598429) LogMinerEventRow{scn=289931598430, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='07001e0077b82d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae5b.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 07001e0077b82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598429) LogMinerEventRow{scn=289931598433, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0d0006002ad92900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ae5e.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=8} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=39]@2023-08-25T13:56:30.282508+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,282 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0d0006002ad92900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=39]@2023-08-25T13:56:30.282539+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,282 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=40]@2023-08-25T13:56:30.282709+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598436, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0900210032f02f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aeda.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0900210032f02f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598439, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='03000a005ab22800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002aedd.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 03000a005ab22800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598442, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='1400050097ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002af39.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 1400050097ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598445, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='08000e00335d2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002af3c.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 08000e00335d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598448, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='12000d00e74a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002afdb.00a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 12000d00e74a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598451, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0f00020090f22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002afdd.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0f00020090f22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598454, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0c00110053582b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b010.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0c00110053582b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598457, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0a0013009c193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b013.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0a0013009c193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598460, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='02001d0035d22700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b057.00cc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 02001d0035d22700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598463, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='0b000500460f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b059.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 0b000500460f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598466, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:26Z, transactionId='04000d0061d12a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0b5.00dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,282 TRACE Oracle|vk_nau27|streaming Transaction 04000d0061d12a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SYSTEM_CHECK' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598471, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:28Z, transactionId='1000170026312a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0b8.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 TRACE Oracle|vk_nau27|streaming Transaction 1000170026312a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 DEBUG Oracle|vk_nau27|streaming Counters{rows=1905, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=901, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 DEBUG Oracle|vk_nau27|streaming Processed in 43 ms. Lag: 2283. Offset SCN: 289931595527, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931598471, txIds=[1000170026312a00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:30,283 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:30,282 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,282 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=8} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,283 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,283 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,282 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,283 DEBUG || filled 158 HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,283 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,283 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,283 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,283 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,283 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,283 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,283 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,283 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,283 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,283 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,283 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,283 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,284 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,283 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,284 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,284 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,284 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,284 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,284 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,284 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,284 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,284 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,284 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,284 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,284 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,284 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,284 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,284 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,284 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,284 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,284 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,284 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,284 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,285 DEBUG || write(array HeapByteBuffer@20ea9b1c[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,285 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,285 DEBUG || sendResponse info=null content=HeapByteBuffer@5be9fc5f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4bfc07d1] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@5be9fc5f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,286 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,286 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,286 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4bfc07d1] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>\ncation/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@5be9fc5f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,286 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,286 DEBUG || flushed 198 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,286 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,286 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,286 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4bfc07d1] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:30,286 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,286 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,286 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,286 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,286 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,286 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || complete(org.eclipse.jetty.util.Callback$3@50581ef3) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,286 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,286 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,286 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,286 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,286 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,286 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,286 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,286 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,286 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,286 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,286 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,286 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,286 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,286 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,286 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,287 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,287 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.28719+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.287256+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,287 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,287 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:56:30.287288+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,287 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,287 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,287 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:30.287387+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:30,287 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,287 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,287 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,287 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,287 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,287 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,287 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,287 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,288 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,288 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,288 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,288 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,288 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:30,288 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,288 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,288 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,288 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:30,288 DEBUG || write(array HeapByteBuffer@131ebac3[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,288 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,288 DEBUG || sendResponse info=null content=HeapByteBuffer@6be3cc49[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,288 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,288 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2ae486f7] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,288 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@6be3cc49[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,288 DEBUG || Date: Fri, 25 Aug 2023 10:56:30 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,288 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:30,288 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2ae486f7] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,288 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\n\non\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6be3cc49[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,288 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,289 DEBUG || flushed 216 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,289 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:30,289 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2ae486f7] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,289 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:30,289 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:30,289 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5644789a) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:30,289 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:30 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:30,289 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:30,289 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:30,289 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:30,289 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,289 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:30,289 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:30,289 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,289 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,289 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:30,290 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,290 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:30,290 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:30,290 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,290 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1895, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=629) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419085 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,291 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1898, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,291 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419085 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,295 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=637, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=631) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,296 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147418057 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,296 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=638, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,296 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,296 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147418057 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,297 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:30,299 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1896, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=630) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419057 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1899, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147419057 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1887, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=626) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384905 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,347 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1890, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,347 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384905 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1888, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,352 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=626) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384900 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1891, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=626, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384899 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,695 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,696 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,696 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=115, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,696 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:30,696 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,696 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=83) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=116, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=83, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1897, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=628) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418668 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1900, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418668 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,711 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:30,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:30,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=120, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=120, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1889, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=627) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,760 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384492 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,760 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1892, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,761 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384491 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1898, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=630) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418584 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,792 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1901, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,792 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418584 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=638, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=632) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,797 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147417556 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,797 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=639, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,797 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,797 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147417556 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1899, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=631) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1902, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1890, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=627) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384404 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,848 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1893, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,848 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384404 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1891, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=627) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384399 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:30,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1894, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=627, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:30,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147384399 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=116, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,196 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,196 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,197 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:31,197 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,197 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:31,197 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=84) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,197 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,197 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,197 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,197 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,197 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,197 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=117, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=84, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,197 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,197 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 486 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1900, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,207 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=629) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418169 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,208 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1903, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,208 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418168 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1892, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=628) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383991 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,261 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1895, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,261 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383991 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1901, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=631) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,293 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418083 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,293 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1904, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,294 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418082 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=639, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=633) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,297 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147417056 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,297 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=640, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,298 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,298 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147417055 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1902, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=632) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,322 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418055 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,322 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1905, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,322 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147418054 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,348 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1893, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=628) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383904 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1896, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383903 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1894, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=628) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383898 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1897, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=628, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383898 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,495 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,495 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,495 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,495 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,495 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,496 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.49605+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,496 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.496148+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,496 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.496208+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,496 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,496 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.496312+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,496 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,496 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,496 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,496 DEBUG || filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,496 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1212/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,496 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,496 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,496 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,496 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,496 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,496 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,496 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,496 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,496 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,496 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,496 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,497 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,497 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,497 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,497 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,497 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,497 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,498 DEBUG || write(array HeapByteBuffer@b6dbe6[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,498 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,498 DEBUG || sendResponse info=null content=HeapByteBuffer@1312cc49[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,498 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,498 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@38a3e5f4] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,498 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@1312cc49[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,498 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,498 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,498 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@38a3e5f4] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,498 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1312cc49[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,498 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,498 DEBUG || flushed 173 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,498 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,498 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,498 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@38a3e5f4] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,498 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,498 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,498 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,498 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,498 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,499 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 DEBUG || complete(org.eclipse.jetty.util.Callback$3@4f45b624) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,499 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,499 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,499 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,499 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,499 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,499 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,499 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,499 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,499 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,499 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,499 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,499 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,499 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,499 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || ReservedThread@2da5db67{RUNNING,thread=Thread[qtp974320615-176,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=1,epc=40]@2023-08-25T13:56:31.500164+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=40]@2023-08-25T13:56:31.500257+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,500 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=40]@2023-08-25T13:56:31.500303+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1212/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,500 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1210/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1213/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=41]@2023-08-25T13:56:31.500375+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,500 DEBUG || filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1210/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1210/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,500 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,500 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:31.500638+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,500 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,500 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:56:31.500774+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1210/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,500 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.500933+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,500 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,500 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1211/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,501 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,501 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.500989+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,501 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,501 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,501 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,501 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1211/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,501 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,501 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,501 DEBUG || filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,501 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,501 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,501 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,501 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,501 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,501 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,501 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,501 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,501 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,501 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,501 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,501 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,501 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,501 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,501 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,501 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,501 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,501 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,501 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,501 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,502 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,502 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,502 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,502 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,502 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,502 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,502 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,503 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,503 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,503 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,503 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,504 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960991503 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,504 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,504 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 740405ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,504 DEBUG || write(array HeapByteBuffer@319f4b16[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,504 DEBUG || write(array HeapByteBuffer@41a3b649[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,504 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,504 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,504 DEBUG || sendResponse info=null content=HeapByteBuffer@3a3bf0a8[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,504 DEBUG || sendResponse info=null content=HeapByteBuffer@226c255f[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,504 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,504 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,504 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1d4492af] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,504 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62754c03] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,504 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@3a3bf0a8[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@226c255f[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,504 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62754c03] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,504 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1d4492af] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,504 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@226c255f[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,504 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@3a3bf0a8[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,504 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,504 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,505 DEBUG || flushed 1047 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 DEBUG || flushed 1671 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,505 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,505 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,505 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,505 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1d4492af] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,505 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62754c03] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,505 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,505 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,505 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,505 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,505 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,505 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,505 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || complete(org.eclipse.jetty.util.Callback$3@208e03f4) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,505 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || complete(org.eclipse.jetty.util.Callback$3@30427ef4) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,505 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,505 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,505 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,505 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,505 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,505 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,505 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,505 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,505 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,505 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,505 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,505 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,506 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=5} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=6} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,506 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,506 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,506 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,506 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,506 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.506318+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,506 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.506408+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.506473+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,506 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,506 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:56:31.506632+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,506 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,506 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,506 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,506 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,506 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,507 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,507 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,507 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,507 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,507 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,507 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,507 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,507 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,507 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,507 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,507 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,507 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,507 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,507 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,507 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,507 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,507 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,508 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,508 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,508 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,508 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,508 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,508 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,508 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,509 DEBUG || write(array HeapByteBuffer@6fb8bbe[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,509 DEBUG || write(array HeapByteBuffer@54e45032[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,509 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,509 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,509 DEBUG || sendResponse info=null content=HeapByteBuffer@2080b49a[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,509 DEBUG || sendResponse info=null content=HeapByteBuffer@2b9b0b27[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,509 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,509 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,509 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ab276c3] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,509 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@2080b49a[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@76be538f] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,509 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@2b9b0b27[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,509 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ab276c3] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,509 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@76be538f] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,509 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2080b49a[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,509 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2b9b0b27[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,509 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,509 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,510 DEBUG || flushed 311 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,510 DEBUG || flushed 312 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,510 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,510 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ab276c3] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,510 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,510 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@76be538f] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,510 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,510 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,510 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,510 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2c0feae5) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,510 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,510 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || complete(org.eclipse.jetty.util.Callback$3@3b23df34) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,510 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,510 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,510 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,510 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,510 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,510 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,510 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,510 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,510 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,510 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,510 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,510 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,510 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,511 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,511 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,511 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,511 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,511 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=41]@2023-08-25T13:56:31.511183+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=41]@2023-08-25T13:56:31.511284+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,511 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=41]@2023-08-25T13:56:31.511317+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,511 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,511 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=42]@2023-08-25T13:56:31.511448+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,511 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,511 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,511 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,511 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,511 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,511 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,511 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,512 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,511 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,512 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,512 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,512 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,512 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,512 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,512 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,512 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,512 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,512 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,512 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,512 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,512 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,512 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,512 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,512 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,512 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,512 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,512 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:56:31.512834+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:56:31.5129+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,512 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:56:31.512938+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,512 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,513 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:56:31.513009+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,513 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,513 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,512 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,513 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,513 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,513 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,513 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,513 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,513 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,513 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,513 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,513 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960991513 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 740396ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,513 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,513 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,514 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,514 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,514 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,514 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,514 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,514 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,514 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,514 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,514 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960991514 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,514 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 740395ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,515 DEBUG || write(array HeapByteBuffer@6c6286a6[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,515 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,515 DEBUG || sendResponse info=null content=HeapByteBuffer@6b4eecb6[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,515 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,515 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@45b4027f] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,515 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@6b4eecb6[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,515 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,515 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,515 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@45b4027f] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,515 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>cation/js...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6b4eecb6[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,515 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,515 DEBUG || flushed 1574 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,515 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,515 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,515 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@45b4027f] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,515 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,515 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,515 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,515 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,515 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,515 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,515 DEBUG || complete(org.eclipse.jetty.util.Callback$3@15e299e6) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,515 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,515 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,515 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,515 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,515 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,516 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,516 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,516 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,516 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,516 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,516 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,516 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,516 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,516 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,516 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,516 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,516 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,516 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || write(array HeapByteBuffer@791a2dc1[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,516 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,517 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,517 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,517 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.517075+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,517 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.517133+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,517 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:56:31.517174+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,517 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,517 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.517226+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,517 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,517 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,517 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,517 DEBUG || filled 148 HeapByteBuffer@1d18b05f[p=0,l=148,c=8192,r=148]={<<>>n/json\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,517 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,517 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,517 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@1d18b05f[p=0,l=148,c=8192,r=148]={<<>>n/json\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,517 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=148,c=8192,r=148]={<<>>n/json\r\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,517 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=148,c=8192,r=148]={<<>>n/json\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,517 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,517 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,517 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,517 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,517 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,517 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,517 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,517 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,517 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,517 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,517 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,517 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,517 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,518 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,516 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,518 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,516 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,518 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,516 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,518 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,518 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 DEBUG || sendResponse info=null content=HeapByteBuffer@7c6fc62f[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,518 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,518 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bc41013] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,518 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@7c6fc62f[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,518 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,518 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960991518 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 740391ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,518 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,518 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bc41013] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,518 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@7c6fc62f[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,518 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,518 DEBUG || flushed 950 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,518 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,518 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,518 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bc41013] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,518 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,518 DEBUG || write(array HeapByteBuffer@249e4d9f[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,518 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,518 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,518 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=6} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,518 DEBUG || sendResponse info=null content=HeapByteBuffer@30014657[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || complete(org.eclipse.jetty.util.Callback$3@39d269d9) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,519 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1b681d09] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@30014657[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,519 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,519 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,519 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1b681d09] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\n\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@30014657[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,519 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,519 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 7 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || flushed 1694 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,519 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1b681d09] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,519 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,519 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,519 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,519 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,519 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=15,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=7} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,519 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || complete(org.eclipse.jetty.util.Callback$3@bd73711) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,519 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:56:31.519832+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,519 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,519 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,519 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,519 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:56:31.519904+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,519 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.51998+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.520031+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,520 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,519 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,520 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,520 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,520 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,520 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || ReservedThread@2da5db67{RUNNING,thread=Thread[qtp974320615-176,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,520 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,520 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,520 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,520 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,520 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,520 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,520 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,520 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,520 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,521 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,521 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,521 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,521 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,521 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,521 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,521 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,521 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,521 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,521 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,521 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,521 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,521 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,521 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,521 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,521 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,521 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=42]@2023-08-25T13:56:31.521603+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,521 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,521 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=42]@2023-08-25T13:56:31.521709+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,521 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=42]@2023-08-25T13:56:31.521749+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,521 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,521 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=43]@2023-08-25T13:56:31.521831+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,521 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,521 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,521 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,521 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,521 DEBUG || filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,521 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,522 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,522 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=15,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,522 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,522 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,522 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,522 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,522 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,522 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,522 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,522 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,522 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,522 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,522 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,522 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,522 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,522 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,523 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,523 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,523 DEBUG || write(array HeapByteBuffer@6b023b08[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,523 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,523 DEBUG || sendResponse info=null content=HeapByteBuffer@ed93870[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,523 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,523 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@373102ee] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,523 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@ed93870[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,523 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,524 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,524 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@373102ee] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,524 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@ed93870[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,524 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,524 DEBUG || flushed 198 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,524 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,524 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,524 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@373102ee] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,524 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,524 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,524 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,524 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,524 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,524 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,524 DEBUG || complete(org.eclipse.jetty.util.Callback$3@67f0365f) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,524 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,524 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,524 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,524 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,524 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,524 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,524 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,524 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,524 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,524 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,524 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,524 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,524 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,524 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:56:31,524 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,524 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,525 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.525028+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,525 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.525178+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,525 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.525226+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,525 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,525 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,525 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692960991525 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 740384ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:56:31,525 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,525 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,525 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,525 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:56:31.525148+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,525 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,525 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,525 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || write(array HeapByteBuffer@4d306da2[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,525 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,525 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=16,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,525 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || sendResponse info=null content=HeapByteBuffer@3e818510[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,525 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,525 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,525 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3aa79b64] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@3e818510[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,525 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,525 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,525 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,525 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,525 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,525 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,525 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3aa79b64] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,525 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@3e818510[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,526 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,526 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,526 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 DEBUG || flushed 1071 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,526 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,526 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,526 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,526 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3aa79b64] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || handling HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || action DISPATCH HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,526 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,526 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,526 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,526 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,526 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,526 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,526 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,526 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,526 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,526 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,526 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1fd8297b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,526 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,526 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,526 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,526 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,526 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,526 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,526 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,526 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 6 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,526 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,526 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,527 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,527 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=7} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,527 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,527 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || write(array HeapByteBuffer@767322a1[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,526 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,527 DEBUG || sendResponse info=null content=HeapByteBuffer@53726c91[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@58f53728{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,527 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || generate: NEED_HEADER for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a2f0c52] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@53726c91[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,527 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,527 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,527 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,527 DEBUG || generate: FLUSH for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a2f0c52] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || write: WriteFlusher@10ce0452{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@53726c91[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,527 DEBUG || update WriteFlusher@10ce0452{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,527 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=1,epc=43]@2023-08-25T13:56:31.527707+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || flushed 226 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=COMPLETING}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=43]@2023-08-25T13:56:31.527764+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,527 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=43]@2023-08-25T13:56:31.527816+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,527 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,527 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,527 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@10ce0452{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,527 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=1,epc=44]@2023-08-25T13:56:31.527918+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,527 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,527 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,527 DEBUG || update WriteFlusher@10ce0452{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,527 DEBUG || filled 158 HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,527 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,528 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,528 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,528 DEBUG || generate: DONE for SendCallback@3769f5c[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a2f0c52] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=16,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=158,c=8192,r=158]={<<>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,528 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,528 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,528 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,528 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,528 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || nextAction(false) COMPLETE HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,528 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || action COMPLETE HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,528 DEBUG || complete(org.eclipse.jetty.util.Callback$3@59a49730) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,528 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,528 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,528 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || completed HttpChannelState@71491617{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || handling HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || unhandle HttpChannelState@71491617{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || action DISPATCH HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || nextAction(false) TERMINATED HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,528 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,528 DEBUG || action TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,528 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,528 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,528 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,528 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,528 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,528 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,528 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,529 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,529 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || onContentComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,529 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || onRequestComplete HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,529 DEBUG || HttpInputOverHTTP@5648c209[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@5b51688{s=END}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || recycle HttpChannelState@71491617{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,529 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || !handle TERMINATED HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,529 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,529 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,529 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,529 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,529 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,529 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,529 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,529 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,529 DEBUG || write(array HeapByteBuffer@5df37141[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,529 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,529 DEBUG || sendResponse info=null content=HeapByteBuffer@677aa756[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@444625b7{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,529 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,529 DEBUG || generate: NEED_HEADER for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca17ee5] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@677aa756[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,529 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,529 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,529 DEBUG || generate: FLUSH for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca17ee5] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,529 DEBUG || write: WriteFlusher@3cebe5aa{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@677aa756[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,529 DEBUG || update WriteFlusher@3cebe5aa{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,530 DEBUG || flushed 198 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=COMPLETING}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,530 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@3cebe5aa{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,530 DEBUG || update WriteFlusher@3cebe5aa{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,530 DEBUG || generate: DONE for SendCallback@15a0e2e6[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca17ee5] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,530 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,530 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,530 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,530 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,530 DEBUG || nextAction(false) COMPLETE HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,530 DEBUG || action COMPLETE HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,530 DEBUG || complete(org.eclipse.jetty.util.Callback$3@e7ca9df) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,530 DEBUG || completed HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,530 DEBUG || unhandle HttpChannelState@3c1ae38b{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,530 DEBUG || nextAction(false) TERMINATED HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,530 DEBUG || action TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,530 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,530 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,530 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,530 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,530 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,530 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,530 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,530 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,530 DEBUG || onContentComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,530 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,530 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,530 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,530 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,530 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,531 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.530981+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,531 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.531099+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,531 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.531066+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,531 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:56:31.531147+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:56:31,531 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,531 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,531 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,531 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,531 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,531 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,531 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,531 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,531 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,531 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,531 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,531 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,531 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,531 DEBUG || handling HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,531 DEBUG || action DISPATCH HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,531 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,532 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,532 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:56:31,532 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,532 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,532 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,532 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:56:31,532 DEBUG || write(array HeapByteBuffer@6dcd6a4a[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,532 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,533 DEBUG || sendResponse info=null content=HeapByteBuffer@717a0019[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2325b416{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,533 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,533 DEBUG || generate: NEED_HEADER for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@143853b3] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,533 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@717a0019[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,533 DEBUG || Date: Fri, 25 Aug 2023 10:56:31 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,533 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:56:31,533 DEBUG || generate: FLUSH for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@143853b3] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,533 DEBUG || write: WriteFlusher@22fa032{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@717a0019[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,533 DEBUG || update WriteFlusher@22fa032{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,533 DEBUG || flushed 216 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=COMPLETING}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,533 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@22fa032{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,533 DEBUG || update WriteFlusher@22fa032{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:56:31,533 DEBUG || generate: DONE for SendCallback@12c3f4a0[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@143853b3] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,533 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,533 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:56:31,529 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,530 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,533 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=18,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:56:31,533 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || nextAction(false) COMPLETE HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || action COMPLETE HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,533 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1594c57b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:56:31,533 DEBUG || completed HttpChannelState@27f128fa{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || unhandle HttpChannelState@27f128fa{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || nextAction(false) TERMINATED HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || action TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,533 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,533 INFO || 10.0.2.141 - - [25/Aug/2023:10:56:31 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:56:31,533 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,533 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || onContentComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,534 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || onRequestComplete HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,534 DEBUG || HttpInputOverHTTP@14fa2b5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=END,0 of -1},g=HttpGenerator@1feb4af1{s=END}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=18,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || recycle HttpChannelState@27f128fa{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,533 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,533 DEBUG || onRequestComplete HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,534 DEBUG || HttpInputOverHTTP@5bd07817[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=END,0 of -1},g=HttpGenerator@48abe408{s=END}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=17,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || recycle HttpChannelState@3c1ae38b{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:56:31,534 DEBUG || !handle TERMINATED HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,534 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || !handle TERMINATED HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:56:31,534 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,534 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,534 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,534 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,534 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,534 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,534 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,535 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,535 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,535 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,535 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,535 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:56:31,535 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,535 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,535 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,535 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:56:31,535 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,535 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:56:31,535 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:56:31,535 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,535 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:56:31,683 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:31,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:31,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=118, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=118, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=117, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,698 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,697 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,698 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=85) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,698 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:31,698 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,698 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,698 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2986 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,698 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:31,698 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=119, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=85, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,698 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,698 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:31,698 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,698 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2985 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1903, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=630) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,708 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417668 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,708 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1906, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,709 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417667 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1895, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=629) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383490 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,762 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1898, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,762 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383490 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1904, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=632) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417582 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,794 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1907, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,794 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417582 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=640, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=634) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,798 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147416555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,798 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=641, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,798 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,798 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147416555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1905, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=633) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1908, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1896, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=629) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383403 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,849 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1899, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,849 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383403 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1897, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=629) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383398 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1900, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=629, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:31,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147383397 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:31,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:31,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=119, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=86) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,198 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,199 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,199 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,199 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=120, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=86, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,198 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,199 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,199 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:32,199 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:32,199 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,199 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,199 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,199 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2484 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1906, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=631) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417167 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,209 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1909, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,209 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417167 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1898, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,262 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=630) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382990 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,263 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1901, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,263 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382989 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1907, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=633) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,295 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417081 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,295 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1910, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,296 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417080 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=641, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=635) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,299 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147416054 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,299 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=642, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,299 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,299 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147416054 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1908, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=634) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417053 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1911, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147417053 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1899, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,349 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=630) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382903 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,350 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1902, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,350 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382902 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1900, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=630) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382897 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,355 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1903, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=630, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,355 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382897 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=120, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=87) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=121, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=87, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,699 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,699 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,700 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,700 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:32,700 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:32,700 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,700 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1909, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=632) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416666 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,710 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1912, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,710 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416666 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1901, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=631) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,763 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382489 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,763 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1904, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1910, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=634) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416580 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,796 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1913, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,796 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416580 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=642, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=636) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,799 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,800 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147415554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,800 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=643, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,800 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,800 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147415553 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,823 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1911, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=635) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416553 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1914, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416552 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1902, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=631) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382402 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,850 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1905, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,850 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,851 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382402 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1903, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=631) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382396 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:32,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1906, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=631, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:32,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147382396 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=121, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=88) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,200 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,201 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,201 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,201 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,201 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1483 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,201 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:33,201 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=122, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=88, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,201 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:33,201 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,201 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,201 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,201 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1482 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1912, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=633) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416165 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,211 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1915, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,211 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,212 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,212 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,212 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416165 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1904, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=632) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381988 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,264 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1907, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,264 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381988 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,297 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1913, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,297 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,297 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=635) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416079 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,298 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1916, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,298 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416078 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,299 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,301 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:33,301 TRACE Oracle|vk_nau27|streaming Current time 1692960993301 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:33,301 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=643, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=637) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,301 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147415052 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,301 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=644, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,301 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,301 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147415052 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,302 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:33,302 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931598806 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:33,302 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,304 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931598807' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,310 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,311 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931598471, endScn=289931598806, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:33,311 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931598472', endScn => '289931598806', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,320 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931598471, 289931598806] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1914, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=636) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416052 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,324 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1917, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,324 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147416052 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1905, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=632) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381901 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,351 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1908, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,351 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381901 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1906, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=632) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381896 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1909, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=632, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381895 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598476, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='13001f00edb22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0ba.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 13001f00edb22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598479, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0d001b000dd52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0c0.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 0d001b000dd52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598482, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='05001f00cc712b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0c3.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 05001f00cc712b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598484) LogMinerEventRow{scn=289931598485, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0f00170039f12900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b0c8.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 0f00170039f12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598488, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='1400140010ae2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b130.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 1400140010ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598491, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a001500e71c3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b132.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 0a001500e71c3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598493) LogMinerEventRow{scn=289931598494, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='03001a008eb42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b137.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Transaction 03001a008eb42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,478 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598497, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='05000c0072762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b16f.01b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 05000c0072762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598500, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='08001a00345d2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b172.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 08001a00345d2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598502) LogMinerEventRow{scn=289931598503, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='12001d001b4d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b177.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 12001d001b4d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598506, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0e000e0090462900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b1b6.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0e000e0090462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598509, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='060012009d0a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b1b8.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 060012009d0a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598511) LogMinerEventRow{scn=289931598512, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='11002000512d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b1bd.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 11002000512d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598515, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0f00010010f02900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b200.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0f00010010f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598518, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='01001b0048b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b202.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 01001b0048b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598521, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0b000e00fe0a2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b245.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0b000e00fe0a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598524, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='14001e0026a82b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b248.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 14001e0026a82b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598527, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='07001d0009c92d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b2e2.01c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 07001d0009c92d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598530, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='09001400d3eb2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b2e5.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 09001400d3eb2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598532) LogMinerEventRow{scn=289931598533, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a001d00001e3100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b2ea.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0a001d00001e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598536, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0d00150003d72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b31d.0138, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0d00150003d72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598539, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0300140056b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b321.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0300140056b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598542, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0400110035ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b324.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0400110035ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598545, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='05001e00a36f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b35c.00a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 05001e00a36f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598548, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0f00140085e72900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b35e.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0f00140085e72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598550) LogMinerEventRow{scn=289931598551, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='080015006d5a2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b363.0048, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 080015006d5a2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598554, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='13000b00fdb22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b3de.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 13000b00fdb22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598557, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='1400130080b12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b3e1.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 1400130080b12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598560, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='10000b0080352a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b496.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 10000b0080352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598563, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='06000300b9082a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b49a.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 06000300b9082a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598566, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='01001c0058b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b49e.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 01001c0058b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598569, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='120013006e512a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4a1.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 120013006e512a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598572, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0b001100250b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4a5.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0b001100250b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598575, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='10001900fa3b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4a9.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 10001900fa3b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598577) LogMinerEventRow{scn=289931598578, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0d000d006edf2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4de.0060, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0d000d006edf2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598580, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0d001f00a4d32900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4ea.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0d001f00a4d32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598583, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0c00020007542b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4ee.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 0c00020007542b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598586, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='02000a0065cb2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b4f2.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,479 TRACE Oracle|vk_nau27|streaming Transaction 02000a0065cb2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598588) LogMinerEventRow{scn=289931598589, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a000f00cd233100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b556.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0a000f00cd233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598591, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a000900c4193100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b57f.0050, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0a000900c4193100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598594, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='06000d003bff2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b583.01e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 06000d003bff2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598597, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0e001f001f492900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b587.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0e001f001f492900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598600, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='11000e0044282b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b598.01b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 11000e0044282b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598603, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='050018006d6e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b59d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 050018006d6e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598606, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0f000f0076f22900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5a1.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0f000f0076f22900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598609, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0300210087b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5b2.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0300210087b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598612, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='08001b00fa5e2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5b6.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 08001b00fa5e2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598615, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='14001000a8b42b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5ba.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 14001000a8b42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598618, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='12001e00c64b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5cb.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 12001e00c64b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598621, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='01000100e0b82500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5d0.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 01000100e0b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598624, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0b001d009c0d2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5d4.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0b001d009c0d2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598626) LogMinerEventRow{scn=289931598627, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a0014002f223100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b5fe.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0a0014002f223100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598628, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a0008001a213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b647.0184, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0a0008001a213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598631, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0d00000084d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b64c.0180, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0d00000084d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598633, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='09001b007fef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b64f.0134, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 09001b007fef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598636, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='070009002cc42d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b669.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 070009002cc42d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598639, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='040004007fd22a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b66d.0120, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 040004007fd22a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598642, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0c000b00db572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b670.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0c000b00db572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598645, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='10000500ad2d2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b68e.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 10000500ad2d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598648, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='06001f002b022a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b692.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 06001f002b022a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598651, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='13001600bdb62b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b697.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 13001600bdb62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598654, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='05002100f2702b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b69c.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 05002100f2702b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598657, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0e002100cc472900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b6a0.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0e002100cc472900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598660, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0f00120050ed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b6c4.0080, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0f00120050ed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598663, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='02001300dbd02700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b6c7.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 02001300dbd02700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598665) LogMinerEventRow{scn=289931598666, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0900100051ef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b6e5.0068, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 0900100051ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598665) LogMinerEventRow{scn=289931598668, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='09001d00bbea2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b728.0064, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 09001d00bbea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598670, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='09000f00a2ee2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b745.0130, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Transaction 09000f00a2ee2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,480 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598673, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='03001f00c4ad2800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b748.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 03001f00c4ad2800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598676, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='14001100e2ac2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b755.01dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 14001100e2ac2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598679, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='11001e00de2d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b75a.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 11001e00de2d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598682, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='08000b00345f2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b75e.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 08000b00345f2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598685, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:29Z, transactionId='0a00010033233100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b76b.0140, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0a00010033233100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598687) LogMinerEventRow{scn=289931598688, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='1200030039472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b77d.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 1200030039472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598687) LogMinerEventRow{scn=289931598691, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0500200007762b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b781.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0500200007762b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598687) LogMinerEventRow{scn=289931598694, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='04001500abce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b791.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 04001500abce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598697, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0100210024ba2500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b799.00bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0100210024ba2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598700, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='13001000cab32b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b79d.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 13001000cab32b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598703, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0f00040007ed2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7a1.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0f00040007ed2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598706, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='14000400feb12b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7a5.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 14000400feb12b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598709, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0b000800570b2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7c2.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0b000800570b2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598712, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0e000c003a422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7c5.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0e000c003a422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598715, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0a001f0002213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7ef.0038, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0a001f0002213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598717) LogMinerEventRow{scn=289931598718, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0d0003003cdb2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b7fc.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0d0003003cdb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598717) LogMinerEventRow{scn=289931598721, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='100008004c362a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b806.01a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 100008004c362a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598717) LogMinerEventRow{scn=289931598724, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='090005009bef2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b80b.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 090005009bef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598727, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='05000e00cc6f2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b80d.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 05000e00cc6f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598730, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0f00110006ee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b810.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0f00110006ee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598732) LogMinerEventRow{scn=289931598733, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0700200003c52d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b821.0138, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0700200003c52d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598732) LogMinerEventRow{scn=289931598736, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='06001d0095072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b824.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 06001d0095072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598732) LogMinerEventRow{scn=289931598739, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='04001c00d8cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b839.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 04001c00d8cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598742, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='03001c00acb22800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b84e.009c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 03001c00acb22800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598744) LogMinerEventRow{scn=289931598745, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='08000000775b2e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b85e.0180, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 08000000775b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598748, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0100170083bc2500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b860.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0100170083bc2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598751, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0b001f009f072f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b864.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0b001f009f072f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598754, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0c001b0045572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b868.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,481 TRACE Oracle|vk_nau27|streaming Transaction 0c001b0045572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598757, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='1300090071b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b879.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 1300090071b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598760, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='10001400eb2a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b87e.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 10001400eb2a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598763, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='14001600a5ad2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b882.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 14001600a5ad2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598766, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='02002100cdca2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b893.0150, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 02002100cdca2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598769, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='12001700664a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b897.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 12001700664a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598772, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='11001a00bc312b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b89b.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 11001a00bc312b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598775, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0e00140062422900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8ae.00b4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0e00140062422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598778, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0a0003000d203100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8b0.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0a0003000d203100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598781, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='070019009ed02d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8b7.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 070019009ed02d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598784, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0500160083172b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8b9.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0500160083172b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598787, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0c0009006b5b2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8c0.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0c0009006b5b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598790, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='06001b003e042a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8d0.0140, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 06001b003e042a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598793, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0f000c0084f02900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8df.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0f000c0084f02900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598796, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='01001a0098b62500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8ed.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 01001a0098b62500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598799, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:30Z, transactionId='0d001e0035d42900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b8fd.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 TRACE Oracle|vk_nau27|streaming Transaction 0d001e0035d42900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 DEBUG Oracle|vk_nau27|streaming Counters{rows=253, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=111, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,482 DEBUG Oracle|vk_nau27|streaming Processed in 4 ms. Lag: 3482. Offset SCN: 289931598471, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931598799, txIds=[0d001e0035d42900]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:33,483 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,486 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,490 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,494 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:33,497 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=122, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,701 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,702 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:33,702 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=89) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,702 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:33,702 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,702 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,702 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,702 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:33,702 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 982 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,702 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=123, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=89, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,702 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,702 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 981 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,711 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:33,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:33,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=121, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=121, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,711 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1915, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=634) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415664 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,712 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1918, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,712 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415664 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,764 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1907, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,764 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,764 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,764 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,764 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=633) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,765 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1910, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,765 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1916, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=636) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415577 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,799 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1919, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,799 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415577 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,801 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=644, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=638) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,802 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147414551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,802 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=645, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,802 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,802 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147414551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1917, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=637) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1920, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1908, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=633) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381400 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,852 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1911, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,852 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381400 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1909, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=633) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381394 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:33,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1912, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=633, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:33,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147381394 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=123, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=90) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,202 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,203 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,203 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 481 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,203 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:34,203 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=124, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=90, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,203 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:34,203 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,203 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,203 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,203 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 480 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1918, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=635) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415163 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1921, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,213 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415163 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1910, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=634) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,265 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1913, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,265 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,299 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1919, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,299 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,299 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=637) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415077 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1922, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,300 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415076 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=645, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=639) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,302 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147414051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,302 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=646, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,302 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,302 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147414051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1920, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=638) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1923, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147415051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1911, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=634) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380899 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,353 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1914, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,353 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380899 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1912, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=634) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,359 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380894 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,359 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1915, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=634, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,359 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380893 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,683 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:34,683 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:34,683 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=125, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=125, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=124, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,703 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,703 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,704 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,704 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=91) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,704 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,704 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,704 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:34,704 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2980 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,704 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:34,704 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=126, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=91, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,704 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,704 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:34,704 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,704 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2979 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1921, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=636) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,713 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414663 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,713 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1924, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414662 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1913, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=635) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380486 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,766 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1916, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,766 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380486 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1922, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=638) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414576 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,800 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1925, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,800 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414576 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=646, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=640) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,803 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147413550 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,803 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=647, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,803 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,803 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147413550 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1923, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=639) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,825 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1926, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,825 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414551 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1914, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=635) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,853 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380399 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,854 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1917, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,854 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380398 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1915, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=635) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380393 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:34,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1918, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=635, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:34,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147380392 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,094 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:56:35,204 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=126, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,205 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,204 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,205 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:35,205 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:35,205 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,205 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=92) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,205 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2479 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=127, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=92, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,205 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,205 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2478 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1924, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=637) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414162 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1927, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414162 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1916, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=636) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,266 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379986 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,267 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1919, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,267 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379985 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1925, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,300 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=639) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414076 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,301 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1928, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,301 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414075 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,303 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=647, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=641) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,304 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147413049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,304 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=648, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,304 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,304 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147413049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1926, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=640) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414050 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,326 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1929, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,326 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147414050 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1917, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=636) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379898 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,354 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1920, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,354 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379898 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1918, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=636) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379891 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,361 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1921, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=636, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379891 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,704 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=127, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=93) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,705 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1978 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,705 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=128, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=93, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,705 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1978 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,705 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,706 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:35,706 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:35,706 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,706 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1927, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=638) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413662 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1930, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413662 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1919, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=637) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1922, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1928, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=640) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413575 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,801 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1931, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,801 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413575 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=648, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=642) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,804 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147412549 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,804 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=649, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,805 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,805 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147412548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1929, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=641) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413550 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1932, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413549 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1920, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,855 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=637) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379397 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,856 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1923, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,856 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379396 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1921, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=637) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379390 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:35,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1924, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=637, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:35,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147379390 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=128, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,205 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,206 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=94) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,206 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,206 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,206 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1478 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,206 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=129, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=94, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,206 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1477 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,206 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,207 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,207 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:36,207 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:36,207 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,207 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1930, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,213 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=639) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413163 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,214 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1933, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,214 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413162 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1922, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=638) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1925, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,297 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,299 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:36,299 TRACE Oracle|vk_nau27|streaming Current time 1692960996299 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:36,299 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,300 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:36,300 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931598818 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:36,300 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,302 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931598819' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1931, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=641) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413074 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,302 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1934, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,302 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413074 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=649, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=643) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,305 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147412048 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,305 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=650, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,305 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,305 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147412048 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,306 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,308 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931598799, endScn=289931598818, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:36,308 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931598800', endScn => '289931598818', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,317 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931598799, 289931598818] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1932, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=642) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1935, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147413048 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1923, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=638) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378896 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,356 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1926, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,356 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,357 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378896 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1924, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=638) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378890 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,363 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1927, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=638, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,363 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378889 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,392 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,392 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598808, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:33Z, transactionId='02000d008ace2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b901.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,392 TRACE Oracle|vk_nau27|streaming Transaction 02000d008ace2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,392 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598811, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:33Z, transactionId='14001f00b3ac2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b907.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,392 TRACE Oracle|vk_nau27|streaming Transaction 14001f00b3ac2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,393 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,393 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3392. Offset SCN: 289931598799, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931598811, txIds=[14001f00b3ac2b00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:36,393 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,398 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,402 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,404 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:36,406 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=129, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=95) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,706 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 977 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,706 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=130, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=95, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,707 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,707 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 976 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,707 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,708 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:36,708 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:36,708 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,708 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:36,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:36,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:36,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=122, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=122, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1933, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=640) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412662 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,714 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1936, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,714 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,715 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,715 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412662 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1925, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,767 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,767 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=639) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,768 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1928, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,768 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378484 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1934, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=642) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412573 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,803 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1937, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,803 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412573 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=650, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=644) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,805 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147411548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,805 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=651, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,805 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,805 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147411548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1935, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=643) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412549 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1938, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412549 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1926, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,857 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=639) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378395 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,858 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1929, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,858 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378394 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1927, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=639) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:36,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378389 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1930, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=639, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:36,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147378388 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:36,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:36,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=130, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=96) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,207 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 476 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,207 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=131, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=96, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,207 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,207 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 476 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,208 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,209 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,209 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:37,209 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:37,209 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,209 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1936, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=641) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412161 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,215 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1939, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,215 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412161 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1928, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=640) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,268 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,269 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1931, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,269 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377983 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1937, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=643) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412072 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,304 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1940, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,304 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412072 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=651, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=645) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,306 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147411047 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,306 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=652, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,306 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,306 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147411047 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1938, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=644) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1941, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147412049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1929, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=640) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377894 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,358 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1932, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,358 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377894 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1930, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=640) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377888 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1933, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=640, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377888 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,684 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:37,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:37,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=132, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,684 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=132, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,684 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:37,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=131, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=97) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,708 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2976 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,708 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=133, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=97, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,708 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,708 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2976 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,709 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,710 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:37,710 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:37,710 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,710 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1939, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=642) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411660 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,716 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1942, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,716 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411660 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1931, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=641) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377483 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,769 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1934, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,769 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377483 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1940, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=644) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411572 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,804 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1943, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,804 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411572 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,806 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=652, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,806 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,806 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=646) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147410547 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=653, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147410546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1941, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=645) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1944, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1932, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,859 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=641) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,860 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377393 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,860 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1935, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,860 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377392 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1933, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=641) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377387 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:37,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1936, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=641, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:37,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147377387 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=133, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=98) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,208 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2476 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,208 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=134, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=98, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,209 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,209 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2475 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,210 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,211 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,211 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:38,211 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:38,211 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,211 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1942, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=643) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,216 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411160 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,216 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1945, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,217 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411159 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,269 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1934, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=642) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376982 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,270 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1937, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,270 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376982 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1943, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=645) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411071 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,305 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1946, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,305 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411071 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=653, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=647) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,307 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147410046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,307 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=654, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,307 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,307 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147410046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1944, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=646) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,328 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411048 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,328 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1947, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147411047 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1935, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=642) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376892 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,360 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1938, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,360 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,361 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376892 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1936, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=642) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376887 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1939, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=642, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376886 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=134, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=99) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,709 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1975 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,709 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=135, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=99, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,709 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,709 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1975 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,711 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,712 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,712 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:38,712 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:38,712 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,712 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1945, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=644) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410659 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,717 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1948, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,717 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410659 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1937, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=643) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,770 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376482 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,770 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1940, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,771 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376481 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1946, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=646) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410571 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,805 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1949, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,805 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410571 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=654, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=648) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147409546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,807 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=655, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,807 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147409546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1947, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=647) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410547 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1950, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410547 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1938, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,861 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=643) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376391 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,862 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1941, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,862 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376390 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1939, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=643) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376386 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:38,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1942, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=643, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:38,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147376385 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,209 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=135, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=100) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,210 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1474 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,210 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=136, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=100, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,210 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,210 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1474 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,212 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,213 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:39,213 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:39,213 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,213 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,217 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1948, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=645) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410158 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,218 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1951, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,218 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410158 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1940, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=644) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375981 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,271 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1943, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,271 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375981 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1949, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=647) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410070 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,306 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1952, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,306 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410070 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=655, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=649) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,308 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147409045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,308 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=656, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,308 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,308 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147409045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1950, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=648) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1953, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147410046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1941, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=644) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375890 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,362 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1944, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,362 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375890 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1942, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=644) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375885 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1945, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=644, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375885 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,406 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,407 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:39,407 TRACE Oracle|vk_nau27|streaming Current time 1692960999407 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:39,408 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,409 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:39,409 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931598888 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:39,409 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,410 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931598889' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,414 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,416 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931598811, endScn=289931598888, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:39,416 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931598812', endScn => '289931598888', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,426 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931598811, 289931598888] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598820, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='0900150001f42f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b90c.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 0900150001f42f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598823, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='030018002eb72800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b912.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 030018002eb72800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598830, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='11000f00592e2b00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b916.0198, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 11000f00592e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598832) LogMinerEventRow{scn=289931598834, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='0a001b00951e3100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b92c.0184, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 0a001b00951e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598837, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='080004002f5b2e00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b92e.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 080004002f5b2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598839) LogMinerEventRow{scn=289931598840, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='07000b00e7cf2d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b93f.00c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,563 TRACE Oracle|vk_nau27|streaming Transaction 07000b00e7cf2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598843, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='0500110051602b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b940.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0500110051602b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598846, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='0400180067cd2a00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b941.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0400180067cd2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598849, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='12001100e34c2a00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b944.00c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 12001100e34c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598852, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='13000f00bdbe2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b954.0054, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 13000f00bdbe2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598854) LogMinerEventRow{scn=289931598855, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='0c0006004e552b00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b967.0054, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0c0006004e552b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598854) LogMinerEventRow{scn=289931598858, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='110006007e2d2b00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b96d.0080, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 110006007e2d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598861, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='02001a00b6d02700', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b978.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 02001a00b6d02700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598863) LogMinerEventRow{scn=289931598864, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:36Z, transactionId='14001b0023af2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b988.0110, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 14001b0023af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598868, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:37Z, transactionId='0f000b0040e82900', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b989.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0f000b0040e82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598871, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:37Z, transactionId='0e001b00e8462900', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b98a.0168, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0e001b00e8462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598874, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:37Z, transactionId='0b000a00380c2f00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b98d.00c8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0b000a00380c2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598877, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:37Z, transactionId='0700150055c32d00', operation='COMMIT', userName='SYS', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9a3.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming Transaction 0700150055c32d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 DEBUG Oracle|vk_nau27|streaming Counters{rows=51, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=18, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 2564. Offset SCN: 289931598811, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931598877, txIds=[0700150055c32d00]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:39,564 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,569 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,573 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,576 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:39,579 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=136, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=101) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,710 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 974 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,710 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=137, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=101, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,711 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,711 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 973 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:39,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:39,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=123, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=123, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,713 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,714 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:39,714 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:39,714 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,714 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1951, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=646) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,718 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409658 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,718 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1954, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,719 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409657 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,771 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1943, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=645) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375480 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,772 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1946, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,772 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375480 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1952, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=648) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409570 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,806 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1955, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,806 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409570 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,808 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=656, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,808 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,808 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,808 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,808 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=650) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147408545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=657, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147408544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1953, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=649) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1956, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1944, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,863 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=645) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375389 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1947, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375388 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1945, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=645) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375384 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:39,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1948, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=645, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:39,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147375384 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=137, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=102) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,211 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 473 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,211 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=138, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=102, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,211 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,211 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 473 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,214 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,215 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:40,215 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:40,215 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,215 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1954, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=647) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409157 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,219 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1957, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,219 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409157 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1946, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,272 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=646) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374980 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1949, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,273 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374979 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1955, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=649) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409069 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,307 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1958, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,307 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409069 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=657, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=651) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,309 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147408044 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,309 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=658, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,309 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,309 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147408044 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1956, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=650) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1959, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147409045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1947, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=646) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374888 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,364 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1950, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,364 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374888 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1948, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=646) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374884 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1951, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=646, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374883 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,685 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:40,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:40,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=139, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=139, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=138, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=103) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,712 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2973 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,712 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=140, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=103, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,712 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,712 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2973 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,715 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,716 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:40,716 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:40,716 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,716 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1957, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=648) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408656 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,720 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1960, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,720 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408656 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1949, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=647) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374479 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,773 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1952, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,773 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374479 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1958, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=650) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,807 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408569 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,807 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1961, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408569 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=658, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=652) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147407544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,809 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=659, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,809 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147407544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1959, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=651) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1962, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1950, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,864 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=647) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374388 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1953, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374387 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1951, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=647) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,869 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374383 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:40,869 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1954, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=647, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:40,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147374382 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=140, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=104) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,212 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2473 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,212 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=141, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=104, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,213 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,213 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2472 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,216 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,217 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,217 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:41,217 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:41,217 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,217 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1960, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=649) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,220 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408156 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,220 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1963, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408155 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,273 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1952, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=648) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373978 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,274 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1955, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,274 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373978 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1961, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=651) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408068 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,308 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1964, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,308 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408068 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=659, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=653) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147407043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=660, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147407043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1962, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=652) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1965, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147408044 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1953, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=648) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373887 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,365 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1956, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,365 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373887 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1954, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=648) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373881 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1957, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=648, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373881 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=141, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=105) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,713 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1972 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,713 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=142, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=105, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,713 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,713 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1972 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,717 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,718 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,718 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:41,718 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:41,718 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,718 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1963, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=650) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407655 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,721 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1966, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,721 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407655 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1955, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=649) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,774 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373478 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,774 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1958, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373478 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1964, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=652) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407568 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,808 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1967, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,808 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407568 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=660, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=654) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,810 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147406543 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,810 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=661, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,811 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,811 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147406543 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1965, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=653) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,833 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,833 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1968, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,833 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407543 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1956, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,865 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=649) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373387 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,866 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1959, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,866 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373386 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,871 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1957, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=649) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373380 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1960, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=649, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:41,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147373380 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:41,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:41,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=142, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=106) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,213 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1472 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,213 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=143, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=106, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,214 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,214 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1471 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,218 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,219 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,219 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:42,219 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:42,219 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,219 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,221 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1966, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,221 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,221 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,221 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,221 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,221 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=651) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,222 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407155 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,222 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1969, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,222 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407154 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1958, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=650) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372977 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,275 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1961, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,275 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372977 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1967, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=653) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407067 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,309 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1970, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,309 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407067 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=661, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=655) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147406043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,310 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=662, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,310 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147406043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1968, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=654) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1971, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147407043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1959, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=650) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372886 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,366 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1962, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,366 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372886 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1960, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,372 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=650) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372880 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1963, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=650, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372879 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,379 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,381 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:42,381 TRACE Oracle|vk_nau27|streaming Current time 1692961002381 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:42,381 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,382 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:42,382 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931598949 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:42,382 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,383 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931598950' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,388 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,390 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931598877, endScn=289931598949, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:42,390 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931598878', endScn => '289931598949', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,400 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931598877, 289931598949] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598890, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0a000600e9213100', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9a8.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0a000600e9213100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598894, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='09001e0037f02f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9af.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 09001e0037f02f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598897, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0400160046ce2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9b3.0154, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0400160046ce2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598900, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0c00040066572b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9b7.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0c00040066572b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598903, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0d001900c9dd2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9d4.00bc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0d001900c9dd2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598906, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='1000150064302a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9d8.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 1000150064302a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598909, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='11000200912d2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9dc.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 11000200912d2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598912, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='050005003f702b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002b9e0.01b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 050005003f702b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598915, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='020004006cd12700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba16.0148, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 020004006cd12700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598918, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0300090024b62800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba1a.012c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0300090024b62800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598921, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='1400090056b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba1e.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 1400090056b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598924, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='06002000100a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba23.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 06002000100a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598927, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='07000f004cc72d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba27.0128, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 07000f004cc72d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598930, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='080016009e622e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba3a.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 080016009e622e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598933, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0c0013002a5b2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba3d.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0c0013002a5b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598936, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='110005008b2e2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba41.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 110005008b2e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598939, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='0f00150085ee2900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba68.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 0f00150085ee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598942, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:39Z, transactionId='01000600a4bd2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba6b.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 TRACE Oracle|vk_nau27|streaming Transaction 01000600a4bd2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,485 DEBUG Oracle|vk_nau27|streaming Counters{rows=37, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=18, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,486 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3485. Offset SCN: 289931598877, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931598942, txIds=[01000600a4bd2500]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:42,486 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,488 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,490 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,492 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:42,494 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:42,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:42,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:42,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=124, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=124, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=143, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=107) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,714 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 971 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,714 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=144, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=107, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,714 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,714 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 971 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,719 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,720 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,720 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:42,720 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:42,720 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,720 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1969, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=652) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406654 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,722 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1972, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,722 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406654 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,775 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1961, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,775 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,775 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,775 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=651) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372477 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1964, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372476 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1970, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=654) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406566 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,810 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1973, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,810 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406566 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=662, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=656) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,811 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147405542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,811 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=663, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,811 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,811 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147405542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1971, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=655) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1974, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1962, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=651) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372385 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,867 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1965, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,867 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372385 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1963, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=651) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372379 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:42,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1966, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=651, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:42,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147372379 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=144, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=108) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,215 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 470 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=145, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=108, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,216 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,216 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 469 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,220 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,221 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:43,221 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:43,221 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,221 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1972, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=653) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,223 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406153 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,223 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1975, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,224 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406153 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1964, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=652) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371976 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,276 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1967, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,276 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371976 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1973, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=655) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=663, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,311 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406065 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1976, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406064 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=657) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147405041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,312 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=664, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,312 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147405041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1974, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=656) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1977, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147406041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1965, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,367 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,367 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=652) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371885 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,368 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1968, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,368 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371884 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1966, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=652) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371878 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1969, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=652, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371878 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,685 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:43,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:43,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=146, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,685 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=146, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:43,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=145, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=109) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,716 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2969 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=147, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=109, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,716 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,716 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2969 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,721 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,722 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,722 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:43,722 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:43,722 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,722 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1975, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=654) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405652 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,724 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1978, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,724 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405652 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1967, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,776 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=653) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371476 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,777 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1970, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,777 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371475 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1976, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=656) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405564 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1979, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405564 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=664, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=658) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,812 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147404541 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,812 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=665, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,813 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,813 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147404540 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1977, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=657) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405541 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,836 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1980, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405540 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1968, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=653) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371384 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,868 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1971, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,868 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371384 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1969, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=653) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371378 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:43,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1972, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=653, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:43,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147371377 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,215 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=147, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=110) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,216 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2469 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,216 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=148, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=110, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,216 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,216 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2469 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,222 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,223 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:44,223 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:44,223 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,223 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1978, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=655) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405151 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,225 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1981, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,225 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405151 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1970, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=654) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370975 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1973, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,277 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370975 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1979, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=657) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405064 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,312 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1982, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,312 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405064 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=665, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=659) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,313 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147404040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,313 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=666, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,313 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,313 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147404040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1980, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=658) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1983, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147405040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1971, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=654) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,369 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370883 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,369 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1974, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,370 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370882 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1972, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=654) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,375 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370877 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1975, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=654, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370876 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=148, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=111) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,716 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1969 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,716 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=149, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=111, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,717 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,717 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1968 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,723 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,724 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:44,724 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:44,724 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,724 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:44,725 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1981, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,725 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,725 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,725 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,725 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,725 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,725 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,725 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=656) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,726 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404651 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,726 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1984, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,726 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404650 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1973, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=655) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370474 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1976, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370474 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,812 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1982, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=658) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404564 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1985, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,813 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404563 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=666, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=660) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,814 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147403539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,814 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=667, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,814 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,814 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147403539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1983, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=659) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1986, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1974, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=655) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370382 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,870 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1977, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,870 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370382 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1975, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=655) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370376 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:44,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1978, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=655, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:44,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147370376 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,094 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=149, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=112) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,217 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1468 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,217 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=150, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=112, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,217 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,217 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1468 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,224 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,225 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,225 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:45,225 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:45,225 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,225 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1984, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=657) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,226 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404150 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,226 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1987, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,227 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404150 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,277 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1976, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=656) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369975 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,278 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1979, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,278 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369974 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1985, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=659) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404063 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,313 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1988, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,313 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404063 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,314 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=667, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,314 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,314 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,314 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,315 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=661) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,315 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,315 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,315 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147403039 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,315 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=668, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,315 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,315 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147403038 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1986, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=660) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404038 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1989, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147404038 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1977, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=656) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,371 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369881 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,371 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1980, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,372 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369881 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1978, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=656) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369875 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1981, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=656, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369875 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,494 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,495 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:45,495 TRACE Oracle|vk_nau27|streaming Current time 1692961005495 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:45,495 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,497 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:45,497 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599011 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:45,497 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,498 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599012' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,502 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,504 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931598942, endScn=289931599011, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:45,504 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931598943', endScn => '289931599011', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,513 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931598942, 289931599011] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598951, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:42Z, transactionId='0b00130047102f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba6f.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 0b00130047102f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598954, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:42Z, transactionId='02002000a6cf2700', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba75.0144, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 02002000a6cf2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SYSTEM_CHECK' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598958, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:43Z, transactionId='10000c00de2d2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba78.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 10000c00de2d2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598963, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='1300000042b42b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002ba8f.0178, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 1300000042b42b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598965) LogMinerEventRow{scn=289931598966, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='07000c0038c62d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002baa1.01e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 07000c0038c62d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598969, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='06000000f0062a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002baa3.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 06000000f0062a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598972, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0a001a009f223100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bab9.0060, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 0a001a009f223100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598974) LogMinerEventRow{scn=289931598975, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0c0019000d532b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bac9.0060, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 0c0019000d532b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598977, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='1100010047302b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bac9.0118, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 1100010047302b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598980, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='02000f00a6cc2700', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bacb.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 02000f00a6cc2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598984, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='05001000036e2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bad2.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 05001000036e2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598987, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='12000b002f4f2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bad4.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 12000b002f4f2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931598989) LogMinerEventRow{scn=289931598990, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='1400210040ae2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002badc.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 1400210040ae2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598993, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0d000b004cd72900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002baeb.00f0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 0d000b004cd72900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598996, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='01001100d1b42500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bafb.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 01001100d1b42500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931598999, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0f001a002ceb2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb0a.0078, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,627 TRACE Oracle|vk_nau27|streaming Transaction 0f001a002ceb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599002, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0a0002003d1a3100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb19.0058, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Transaction 0a0002003d1a3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599005, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='03001500a7b42800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb29.014c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Transaction 03001500a7b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599008, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0e000d005b422900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb39.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming Transaction 0e000d005b422900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 DEBUG Oracle|vk_nau27|streaming Counters{rows=74, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=19, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 DEBUG Oracle|vk_nau27|streaming Processed in 1 ms. Lag: 628. Offset SCN: 289931598942, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599008, txIds=[0e000d005b422900]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:45,628 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,635 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,637 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,639 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:45,641 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:45,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:45,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:45,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=125, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=125, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=150, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=113) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,718 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 967 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,718 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=151, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=113, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,718 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,718 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 967 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,725 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,726 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,726 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:45,726 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:45,726 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,726 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1987, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=658) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403649 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,727 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1990, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,727 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403649 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1979, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,778 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=657) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,779 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369474 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,779 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1982, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,779 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369473 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1988, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,813 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=660) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403563 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,814 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1991, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,814 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403562 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=668, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=662) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,815 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147402538 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,815 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=669, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,815 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,815 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147402538 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1989, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=661) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403538 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,838 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1992, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,838 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403538 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1980, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=657) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369380 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,872 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1983, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,872 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369380 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1981, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=657) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:45,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369375 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:45,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1984, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=657, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:45,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147369374 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,218 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=151, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=114) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,219 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 466 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,219 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=152, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=114, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,219 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,219 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 466 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,226 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,227 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,227 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:46,227 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:46,227 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,227 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1990, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=659) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403148 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,228 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1993, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,228 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403148 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1982, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=658) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368973 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,279 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1985, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,279 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368973 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1991, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=661) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403062 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,314 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1994, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,314 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403062 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=669, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=663) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,316 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147402037 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,316 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=670, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,316 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,316 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147402037 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1992, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=662) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403037 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,339 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1995, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,339 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147403037 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1983, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=658) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368879 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,373 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1986, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,373 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368879 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1984, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=658) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368874 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,378 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1987, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=658, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368874 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,685 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:46,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:46,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=153, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=153, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=152, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=115) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,719 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2966 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,719 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=154, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=115, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,720 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,720 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2965 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,727 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,728 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,728 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:46,728 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:46,728 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,728 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:46,728 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1993, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,728 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,728 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,728 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,728 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,728 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=660) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,729 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402648 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,729 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1996, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,729 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402647 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1985, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=659) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368472 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,780 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1988, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,780 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368472 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1994, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=662) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,815 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402561 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,815 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1997, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=670, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=664) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,816 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147401537 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,817 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=671, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,817 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,817 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147401536 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1995, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=663) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402537 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1998, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402536 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1986, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,873 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,873 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=659) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368379 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,874 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1989, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,874 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368378 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1987, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=659) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368373 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1990, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=659, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:46,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147368373 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:46,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:46,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=154, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=116) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,220 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2465 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,220 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=155, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=116, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,220 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,220 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2465 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,228 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,229 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,229 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:47,229 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:47,229 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,229 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1996, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=661) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,229 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1999, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,229 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1988, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,280 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=660) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367972 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,281 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1991, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,281 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367971 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1997, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=663) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2000, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,316 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=671, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=665) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,317 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147401036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,317 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=672, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,317 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,317 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147401036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1998, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=664) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2001, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147402036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1989, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=660) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367878 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,374 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1992, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,374 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367878 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1990, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=660) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367873 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1993, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=660, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367872 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,634 TRACE || [Producer clientId=naument--offsets] No load stats for topic naument-offset, not using adaptive [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=155, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=117) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,721 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1964 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,721 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=156, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=117, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,721 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,721 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1964 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,729 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,730 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:47,730 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:47,730 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,730 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=1999, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=662) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401646 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,730 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2002, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,730 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401646 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1991, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=661) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367471 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,781 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1994, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,781 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367471 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2000, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=664) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,816 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2003, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,816 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401560 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=672, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=666) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,818 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147400535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,818 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=673, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,818 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,818 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147400535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,827 TRACE || [Producer clientId=naument--statuses] All queue lengths are the same, not using adaptive for topic naument-status [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2001, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=665) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2004, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1992, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=661) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,875 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367377 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,875 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1995, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367376 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1993, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=661) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367372 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1996, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=661, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:47,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147367372 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:47,936 TRACE || [Producer clientId=naument--configs] The number of partitions is too small: available=1, all=1, not using adaptive for topic naument-config [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=156, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=118) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,221 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1464 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,221 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=157, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=118, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,222 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,222 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1463 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2002, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,230 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=663) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,231 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,231 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401146 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,231 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2005, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,231 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401145 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1994, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=662) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366970 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,282 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1997, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,282 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366970 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2003, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,316 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=665) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401060 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,317 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2006, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,317 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401059 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=673, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=667) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,318 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147400035 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,318 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=674, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,318 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,318 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147400035 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2004, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=666) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401035 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2007, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147401034 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,375 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1995, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=662) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366876 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,376 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1998, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,376 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366876 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1996, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,380 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,380 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=662) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366872 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1999, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=662, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366871 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,441 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,442 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:48,442 TRACE Oracle|vk_nau27|streaming Current time 1692961008442 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:48,442 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,444 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:48,444 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599022 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:48,444 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,445 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599023' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,449 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,451 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599008, endScn=289931599022, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:48,451 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599009', endScn => '289931599022', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,461 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599008, 289931599022] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599013, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='08000f0000592e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb3b.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming Transaction 08000f0000592e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599016, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:45Z, transactionId='0b0014005a0e2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb42.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming Transaction 0b0014005a0e2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3542. Offset SCN: 289931599008, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599016, txIds=[0b0014005a0e2f00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:48,542 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,545 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,547 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,549 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:48,551 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:48,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:48,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:48,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=126, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=126, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=157, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=119) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,722 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 963 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,722 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=158, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=119, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,722 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,722 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 963 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,732 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,731 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2005, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,732 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:48,732 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,732 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=664) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400644 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,732 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2008, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,732 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400644 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1997, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=663) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,782 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366470 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,782 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2000, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,783 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366469 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2006, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=666) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400559 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,817 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2009, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,817 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400559 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=674, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=668) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,819 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147399534 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,819 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=675, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,819 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,819 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147399534 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2007, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=667) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400534 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2010, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400534 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1998, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,876 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=663) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366376 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,877 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2001, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,877 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366375 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=1999, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,881 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=663) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:48,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366371 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:48,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2002, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=663, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:48,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147366370 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,222 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=158, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=120) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,223 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 462 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,223 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=159, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=120, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,223 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,223 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 462 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,232 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2008, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,233 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,233 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=665) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400143 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2011, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,233 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400143 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2000, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=664) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365969 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,283 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2003, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,283 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,284 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365969 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2009, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=667) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2012, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,318 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=675, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=669) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,319 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147399034 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,319 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=676, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,320 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,320 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147399033 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2010, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=668) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400034 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2013, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147400033 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2001, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=664) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365875 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,377 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2004, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,377 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,378 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365875 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2002, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=664) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365869 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2005, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=664, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365869 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,685 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:49,685 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:49,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=160, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=160, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=159, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=121) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,723 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2962 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,723 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=161, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=121, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,723 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,723 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2962 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2011, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,733 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,734 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=666) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399643 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2014, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399642 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2003, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=665) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365468 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,784 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2006, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,784 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,785 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365468 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2012, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=668) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399558 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,818 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2015, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,818 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399558 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=676, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=670) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,821 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147398532 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,821 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=677, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,821 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,821 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147398532 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2013, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=669) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399533 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2016, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399533 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2004, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,878 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=665) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365374 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,879 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2007, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,879 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365373 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2005, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=665) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,883 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365369 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:49,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2008, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=665, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:49,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147365368 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,224 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=161, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=122) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2460 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=162, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=122, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2460 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,233 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2014, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=667) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399143 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2017, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399142 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,234 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,235 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,235 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:50,235 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:50,235 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,235 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2006, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=666) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364967 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,285 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2009, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,285 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364967 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,318 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2015, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=669) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399058 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,319 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2018, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,319 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399057 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=677, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=671) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,321 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147398032 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,321 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=678, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,322 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,322 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147398031 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2016, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=670) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,343 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399033 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2019, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147399032 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2007, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=666) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364873 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,379 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2010, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,379 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364873 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2008, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=666) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2011, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=666, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=162, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=123) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,725 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1960 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,725 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=163, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=123, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,725 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,725 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1960 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2017, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=668) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398642 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2020, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398642 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,735 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,736 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,736 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:50,736 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:50,736 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,736 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2009, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=667) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364466 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,786 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2012, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,786 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,787 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364466 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2018, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=670) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398557 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,819 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2021, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,819 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398557 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=678, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=672) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,822 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147397531 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,822 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=679, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,822 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,822 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147397531 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2019, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=671) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398532 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2022, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398532 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2010, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=667) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364372 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,880 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2013, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,880 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,881 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364372 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2011, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=667) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364367 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:50,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2014, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=667, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:50,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147364366 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=163, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=124) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1460 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,225 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=164, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=124, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,225 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1460 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2020, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=669) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398142 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,234 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2023, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,234 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398142 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,236 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,237 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,237 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:51,237 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:51,237 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,237 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,287 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2012, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,287 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,287 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,287 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,287 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,287 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=668) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363965 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,288 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2015, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,288 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363964 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2021, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=671) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,320 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398056 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,320 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2024, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398055 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=679, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=673) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,323 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147397030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,323 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=680, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,323 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,323 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147397030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2022, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=672) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398031 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2025, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147398031 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2013, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,381 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=668) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363871 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,382 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2016, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,382 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363870 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2014, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=668) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,386 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2017, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=668, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,386 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147363866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,551 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,552 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:51,552 TRACE Oracle|vk_nau27|streaming Current time 1692961011552 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:51,552 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,554 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:51,554 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599051 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:51,554 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,555 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599052' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,560 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,562 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599016, endScn=289931599051, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:51,562 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599017', endScn => '289931599051', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,571 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599016, 289931599051] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,606 DEBUG || WorkerSourceTask{id=vk_nau27_src-0} Committing offsets [org.apache.kafka.connect.runtime.SourceTaskOffsetCommitter] 2023-08-25 13:56:51,606 DEBUG || WorkerSourceTask{id=vk_nau27_src-0} Committing offsets [org.apache.kafka.connect.runtime.WorkerSourceTask] 2023-08-25 13:56:51,606 INFO || WorkerSourceTask{id=vk_nau27_src-0} Committing offsets for 2 acknowledged messages [org.apache.kafka.connect.runtime.WorkerSourceTask] 2023-08-25 13:56:51,606 DEBUG || WorkerSourceTask{id=vk_nau27_src-0} There are currently no pending messages for this offset commit; all messages dispatched to the task's producer since the last commit have been acknowledged [org.apache.kafka.connect.runtime.WorkerSourceTask] 2023-08-25 13:56:51,606 DEBUG || Submitting 1 entries to backing store. The offsets are: {{server=vk_nau27}={commit_scn=289931578364:1:0c000100a6562b00, transaction_id=null, snapshot_scn=289931548407, scn=289931575536}} [org.apache.kafka.connect.storage.OffsetStorageWriter] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] Attempting to append record ProducerRecord(topic=naument-offset, partition=null, headers=RecordHeaders(headers = [], isReadOnly = false), key=[B@19ddea85, value=[B@4a6d5614, timestamp=null) with callback org.apache.kafka.connect.storage.KafkaOffsetBackingStore$SetCallbackFuture@66a04640 to topic naument-offset partition 1 [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] Allocating a new 16384 byte message buffer for topic naument-offset partition 1 with remaining timeout 60000ms [org.apache.kafka.clients.producer.internals.RecordAccumulator] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] Waking up the sender since topic naument-offset partition 1 is either full or getting a new batch [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] No load stats for topic naument-offset, not using adaptive [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] Nodes with data ready to send: [broker3:29092 (id: 3 rack: null)] [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:56:51,607 DEBUG || [Producer clientId=naument--offsets] Sending PRODUCE request with header RequestHeader(apiKey=PRODUCE, apiVersion=9, clientId=naument--offsets, correlationId=7, headerVersion=2) and timeout 30000 to node 3: {acks=-1,timeout=30000,partitionSizes=[naument-offset-1=229]} [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,607 TRACE || [Producer clientId=naument--offsets] Sent produce request to 3: (type=ProduceRequest, acks=-1, timeout=30000, partitionRecords=([PartitionProduceData(index=1, records=MemoryRecords(size=229, buffer=java.nio.HeapByteBuffer[pos=0 lim=229 cap=229]))]), transactionalId='' [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:56:51,608 TRACE || [Producer clientId=naument--offsets] No load stats for topic naument-offset, not using adaptive [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:51,608 DEBUG || [Producer clientId=naument--offsets] Received PRODUCE response from node 3 for request with header RequestHeader(apiKey=PRODUCE, apiVersion=9, clientId=naument--offsets, correlationId=7, headerVersion=2): ProduceResponseData(responses=[TopicProduceResponse(name='naument-offset', partitionResponses=[PartitionProduceResponse(index=1, errorCode=0, baseOffset=7, logAppendTimeMs=-1, logStartOffset=0, recordErrors=[], errorMessage=null)])], throttleTimeMs=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,608 TRACE || [Producer clientId=naument--offsets] Received produce response from node 3 with correlation id 7 [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:56:51,608 TRACE || Successfully produced messages to naument-offset-1 with base offset 7. [org.apache.kafka.clients.producer.internals.ProducerBatch] 2023-08-25 13:56:51,608 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Finished flushing offsets to storage [org.apache.kafka.connect.runtime.WorkerSourceTask] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2015, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[FetchableTopicResponse(topic='', topicId=BTm6uZ6rR_KkKR-jkrsw3A, partitions=[PartitionData(partitionIndex=1, errorCode=0, highWatermark=8, lastStableOffset=8, logStartOffset=0, divergingEpoch=EpochEndOffset(epoch=-1, endOffset=-1), currentLeader=LeaderIdAndEpoch(leaderId=-1, leaderEpoch=-1), snapshotId=SnapshotId(endOffset=-1, epoch=-1), abortedTransactions=null, preferredReadReplica=-1, records=MemoryRecords(size=229, buffer=java.nio.HeapByteBuffer[pos=0 lim=229 cap=232]))])]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,608 TRACE || [Producer clientId=naument--offsets] No load stats for topic naument-offset, not using adaptive [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(naument-offset-1), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Fetch READ_UNCOMMITTED at offset 7 for partition naument-offset-1 returned fetch data PartitionData(partitionIndex=1, errorCode=0, highWatermark=8, lastStableOffset=8, logStartOffset=0, divergingEpoch=EpochEndOffset(epoch=-1, endOffset=-1), currentLeader=LeaderIdAndEpoch(leaderId=-1, leaderEpoch=-1), snapshotId=SnapshotId(endOffset=-1, epoch=-1), abortedTransactions=null, preferredReadReplica=-1, records=MemoryRecords(size=229, buffer=java.nio.HeapByteBuffer[pos=0 lim=229 cap=232])) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 DEBUG || WorkerSourceTask{id=vk_nau27_src-0} Finished commitOffsets successfully in 2 ms [org.apache.kafka.connect.runtime.WorkerSourceTask] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 DEBUG || Committing offset '{server=vk_nau27}' for partition '{commit_scn=289931578364:1:0c000100a6562b00, transaction_id=null, snapshot_scn=289931548407, scn=289931575536}' [io.debezium.connector.common.BaseSourceTask] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Preparing to read 229 bytes of data for partition naument-offset-1 with offset FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Updating high watermark for partition naument-offset-1 to 8 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Updating log start offset for partition naument-offset-1 to 0 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Updating last stable offset for partition naument-offset-1 to 8 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Returning 1 fetched records at offset FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} for assigned partition naument-offset-1 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Updating fetch position from FetchPosition{offset=7, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} for partition naument-offset-1 and returning 1 records from `poll()` [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,608 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=669) for node 3. Added (), altered (BTm6uZ6rR_KkKR-jkrsw3A:naument-offset-1), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(naument-offset-1), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2018, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=669, topics=[FetchTopic(topic='naument-offset', topicId=BTm6uZ6rR_KkKR-jkrsw3A, partitions=[FetchPartition(partition=1, currentLeaderEpoch=68, fetchOffset=8, lastFetchedEpoch=-1, logStartOffset=-1, partitionMaxBytes=104857600)])], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,609 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483647 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599024, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:48Z, transactionId='05000300e06c2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb46.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 05000300e06c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599027, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:48Z, transactionId='12001500e6472a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb4c.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 12001500e6472a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599032, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='07000500a60b2d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb63.00c4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 07000500a60b2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599034) LogMinerEventRow{scn=289931599035, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='09000c004aea2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb74.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 09000c004aea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599036, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='04001a0006d22a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb74.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 04001a0006d22a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599039, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='0d000c006ad32900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb84.006c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 0d000c006ad32900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599043, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='10000900af342a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bb93.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 10000900af342a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599046, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:50Z, transactionId='0c000100b0562b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bba2.01dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming Transaction 0c000100b0562b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 DEBUG Oracle|vk_nau27|streaming Counters{rows=31, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=8, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 1663. Offset SCN: 289931599016, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599046, txIds=[0c000100b0562b00]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:51,663 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,665 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,667 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,669 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:51,671 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:51,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:51,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:51,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=127, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=127, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=164, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=125) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,726 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 959 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,726 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=165, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=125, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,726 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,726 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 959 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2023, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=670) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397642 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,734 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2026, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,734 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397642 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,737 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,738 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,738 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:51,738 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:51,738 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,738 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2024, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=672) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,821 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2027, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,821 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=680, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=674) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,823 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147396530 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,823 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=681, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,824 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,824 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147396529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2025, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=673) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397531 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2028, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397530 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2016, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=669) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483374 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,882 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2019, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,882 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483374 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2017, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=669) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:51,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483369 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:51,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2020, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=669, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:51,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483369 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2018, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=670) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,109 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2021, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,109 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,110 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147483147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=165, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=126) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,227 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 458 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,227 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=166, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=126, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,227 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,227 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 458 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2026, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=671) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397141 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,235 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2029, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,235 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397141 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,238 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,239 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,239 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:52,239 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:52,239 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,239 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2027, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=673) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397055 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,321 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2030, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,321 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397055 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=681, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=675) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,324 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147396029 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,324 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=682, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,324 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,324 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147396029 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2028, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=674) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2031, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147397030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2019, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=670) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482873 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,383 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2022, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,383 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,384 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482873 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2020, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=670) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482869 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2023, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=670, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482868 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2021, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=671) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,610 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482646 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,611 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2024, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,611 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482645 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,686 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:52,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:52,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=167, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=167, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,686 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:52,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,686 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,687 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,687 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=166, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=127) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,728 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2958 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,728 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=168, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=127, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,728 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,728 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2958 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2029, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=672) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396640 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,736 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2032, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,736 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396640 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,739 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,740 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:52,740 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:52,740 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,740 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2030, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=674) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,822 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2033, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,822 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=682, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=676) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,825 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147395528 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,825 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=683, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,825 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,825 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147395528 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2031, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=675) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2034, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2022, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,884 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=671) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482372 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,885 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2025, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,885 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482371 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2023, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=671) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482368 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:52,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2026, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=671, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:52,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482368 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,111 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2024, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,111 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,111 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,111 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,111 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,111 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,111 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,111 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=672) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482145 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2027, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147482144 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=168, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=128) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,229 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2457 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=169, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=128, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,230 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,230 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2456 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2032, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=673) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,237 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396139 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,237 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2035, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396138 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,240 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,241 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,241 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:53,241 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:53,241 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,241 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2033, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=675) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396053 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,323 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2036, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,323 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396053 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=683, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=677) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,326 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147395027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,326 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=684, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,326 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,326 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147395027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2034, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=676) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396028 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,348 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2037, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,348 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147396028 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2025, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=672) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481871 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,385 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2028, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,385 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481871 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2026, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=672) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2029, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=672, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2027, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=673) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481644 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,612 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2030, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,612 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481644 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=169, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=129) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1956 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=170, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=129, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1956 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2035, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=674) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395638 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2038, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395638 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,741 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,742 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,742 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:53,742 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:53,742 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,742 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2036, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=676) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395552 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,824 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2039, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,824 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395552 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=684, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=678) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,826 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147394527 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,826 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=685, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,826 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,826 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147394527 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2037, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=677) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395528 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2040, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395527 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2028, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=673) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481370 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,886 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2031, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,886 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,887 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481370 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2029, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,889 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=673) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481367 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:53,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2032, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=673, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:53,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481366 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2030, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=674) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481144 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,112 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2033, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,112 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147481144 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=170, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=130) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,230 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1456 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=171, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=130, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,230 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,230 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1456 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2038, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=675) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395138 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,238 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2041, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,238 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395138 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,242 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,243 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,243 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:54,243 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:54,243 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,243 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2039, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=677) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,325 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2042, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,325 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395051 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=685, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=679) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,327 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147394026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,327 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=686, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,327 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,327 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147394026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2040, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=678) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2043, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147395027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2031, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=674) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,387 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480869 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,388 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2034, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,388 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480868 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2032, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=674) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2035, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=674, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,391 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,471 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,472 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:54,472 TRACE Oracle|vk_nau27|streaming Current time 1692961014472 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:54,472 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,474 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:54,474 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599063 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:54,474 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,475 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599064' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,480 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,481 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599046, endScn=289931599063, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:54,481 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599047', endScn => '289931599063', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,490 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599046, 289931599063] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599053, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:51Z, transactionId='03001d00a7b52800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bba6.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming Transaction 03001d00a7b52800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599056, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:51Z, transactionId='0600060031072a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbac.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming Transaction 0600060031072a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3567. Offset SCN: 289931599046, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599056, txIds=[0600060031072a00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:54,567 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,569 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,571 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,573 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:54,575 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2033, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=675) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480643 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,613 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2036, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,613 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480643 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:54,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:54,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=128, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=128, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=171, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=131) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 956 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,730 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=172, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=131, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,730 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 956 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2041, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=676) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394638 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,738 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2044, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,738 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394638 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,743 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,744 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:54,744 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:54,744 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,744 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2042, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=678) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,826 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394550 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,827 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2045, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,827 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394549 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,827 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=686, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=680) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,828 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147393525 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,828 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=687, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,828 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,828 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147393525 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2043, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=679) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394526 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2046, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394526 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,887 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2034, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=675) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480368 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,888 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2037, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,888 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480368 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2035, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=675) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480364 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:54,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2038, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=675, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:54,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480364 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,094 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2036, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=676) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,114 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480142 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,114 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2039, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,115 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147480141 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=172, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=132) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,231 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 455 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,231 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=173, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=132, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,231 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,231 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 455 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2044, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=677) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394137 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,239 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2047, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,239 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394137 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,244 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,245 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,245 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:55,245 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:55,245 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,245 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2045, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=679) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,327 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2048, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,327 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394049 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=687, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=681) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,328 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147393025 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,328 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=688, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,328 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,328 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147393025 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2046, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=680) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2049, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147394026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2037, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=676) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,389 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,389 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2040, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479867 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2038, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=676) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479864 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2041, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=676, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479863 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2039, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=677) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,615 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479641 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,615 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2042, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,616 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479640 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,687 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:55,687 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:55,687 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=174, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,687 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,687 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=174, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:55,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=173, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=133) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,732 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2955 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,732 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=175, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=133, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,732 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,732 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2955 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2047, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=678) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393636 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,740 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2050, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,740 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393636 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,745 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,746 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:55,746 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:55,746 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,746 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2048, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=680) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,828 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2051, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,828 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393548 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=688, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=682) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147392524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,829 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=689, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,829 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147392524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2049, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=681) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393526 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2052, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393526 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2040, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,889 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=677) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479367 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,890 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2043, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,890 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479366 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2041, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=677) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479363 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:55,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2044, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=677, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:55,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479363 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2042, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=678) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479140 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,116 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2045, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,116 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,117 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147479140 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,215 TRACE || [Producer clientId=connector-producer-vk_nau27_src-0] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27 [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:56,215 TRACE || [Producer clientId=connector-producer-vk_nau27_src-0] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27 [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=175, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=134) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,233 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2454 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,233 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=176, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=134, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,233 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,233 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2454 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2050, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=679) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393135 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,241 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2053, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,241 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393135 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,246 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,247 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,247 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:56,247 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:56,247 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,247 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2051, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=681) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393047 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,329 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2054, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,329 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393047 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=689, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=683) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,330 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147392023 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,330 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=690, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,330 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,330 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147392023 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2052, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=682) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393025 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2055, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147393025 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2043, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=678) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,390 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2046, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,390 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478866 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2044, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=678) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478863 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,393 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2047, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=678, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,393 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478863 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2045, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=679) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478639 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,617 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2048, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,617 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,618 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478639 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=176, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=135) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,734 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1953 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,734 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=177, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=135, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,734 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,734 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1953 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2053, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=680) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392634 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,742 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2056, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,742 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392634 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,747 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,748 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,748 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:56,748 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:56,748 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,748 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2054, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=682) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392547 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,829 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2057, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,829 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392547 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=690, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=684) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,830 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,830 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147391523 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=691, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,831 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,831 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147391522 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2055, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=683) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392525 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2058, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:56:56,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2046, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,891 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=679) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478365 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,892 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2049, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,892 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478364 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2047, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=679) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478362 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:56,894 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2050, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=679, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:56,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478362 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2048, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=680) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,118 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478138 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,118 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2051, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,119 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147478137 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=177, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=136) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,235 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1452 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=178, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=136, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,235 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,235 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1452 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2056, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=681) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392133 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,243 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2059, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,243 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392133 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,248 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,249 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,249 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:57,249 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:57,249 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,249 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2057, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=683) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2060, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=691, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=685) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,331 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147391022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,331 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=692, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,331 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,331 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147391022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2058, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=684) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392024 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2061, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147392024 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,374 DEBUG || SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=25844/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 25844 ms, remaining: 4156 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:56:57,380 DEBUG || SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=25845/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 25845 ms, remaining: 4155 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:56:57,381 DEBUG || SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=25846/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 25846 ms, remaining: 4154 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2049, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=680) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477864 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,392 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2052, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,392 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477864 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2050, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=680) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477862 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,395 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2053, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=680, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,395 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477861 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,575 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,576 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:57,576 TRACE Oracle|vk_nau27|streaming Current time 1692961017576 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:57,576 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,578 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:56:57,578 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599077 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:57,578 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,579 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599078' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,584 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,586 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599056, endScn=289931599077, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:57,586 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599057', endScn => '289931599077', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,596 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599056, 289931599077] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2051, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=681) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477637 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,619 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2054, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,619 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,620 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477637 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,678 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599065, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:54Z, transactionId='14000100ddb02b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbb1.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 TRACE Oracle|vk_nau27|streaming Transaction 14000100ddb02b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599068, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:54Z, transactionId='0100180097b32500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbb7.01c0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 TRACE Oracle|vk_nau27|streaming Transaction 0100180097b32500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3679. Offset SCN: 289931599056, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599068, txIds=[0100180097b32500]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:56:57,679 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,681 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,684 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,686 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:56:57,688 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:56:57,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:57,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:57,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=129, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=129, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=178, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=137) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=179, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=137, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2059, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=682) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391632 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,744 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2062, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,744 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391632 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,749 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,750 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:57,750 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:57,750 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,750 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2060, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=684) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,830 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2063, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,830 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391546 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=692, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=686) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,831 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147390522 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,831 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=693, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,831 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,831 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147390522 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2061, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=685) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2064, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2052, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=681) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,893 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477363 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,893 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2055, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,894 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477362 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2053, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=681) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477361 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:57,896 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2056, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=681, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:57,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477360 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2054, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=682) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477136 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,120 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2057, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,120 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147477136 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=179, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=138) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=180, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=138, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2062, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=683) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2065, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,250 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,251 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:58,251 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:58,251 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,251 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2063, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,330 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,330 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=685) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391046 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2066, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,331 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391045 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=693, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=687) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,332 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147390021 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,332 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=694, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,332 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,332 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147390021 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2064, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=686) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391024 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2067, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147391024 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2055, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=682) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476862 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,394 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2058, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,394 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476862 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2056, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=682) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476860 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2059, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=682, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476860 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2057, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=683) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476635 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,621 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2060, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,621 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476635 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,687 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:56:58,687 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:58,687 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=181, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=181, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=180, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=139) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=182, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=139, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2065, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=684) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2068, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,751 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,752 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,752 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:58,752 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:58,752 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,752 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2066, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=686) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,831 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2069, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,831 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390545 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=694, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=688) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,833 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147389520 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,833 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=695, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,833 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,833 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147389520 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2067, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=687) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390523 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2070, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390523 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,888 TRACE || [Producer clientId=vk_nau27-schemahistory] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27_src.schema-changes [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2058, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=683) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,895 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476361 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,895 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2061, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,896 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476360 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2059, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=683) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476359 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:58,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2062, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=683, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:58,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476359 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2060, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=684) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476134 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,122 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2063, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,122 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147476134 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,235 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=182, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=140) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=183, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=140, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2068, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=685) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2071, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,252 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,253 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:59,253 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:59,253 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,253 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,331 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2069, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=687) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390044 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,332 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2072, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,332 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390044 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=695, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=689) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,334 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147389019 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,334 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=696, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,334 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,334 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147389019 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2070, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=688) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2073, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147390022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2061, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=684) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475860 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,396 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2064, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,396 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475860 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2062, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=684) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475859 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2065, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=684, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,397 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475859 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2063, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=685) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475633 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2066, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475633 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,735 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=183, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=141) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=184, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=141, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2071, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=686) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2074, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,753 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,754 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,754 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:59,754 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:56:59,754 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,754 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2072, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=688) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,832 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2075, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,832 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389544 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=696, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=690) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,834 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147388519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,834 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=697, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,835 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,835 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147388519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2073, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=689) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389522 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2076, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2064, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=685) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,897 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475359 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2067, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,897 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2065, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=685) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475359 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:56:59,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2068, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=685, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:56:59,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475358 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2066, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=686) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475133 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,123 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2069, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,123 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147475133 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=184, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=142) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=185, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=142, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2074, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=687) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2077, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,254 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,255 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,255 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:00,255 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:00,255 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,255 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2075, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,333 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,333 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=689) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,334 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389043 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,334 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2078, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,334 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389042 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=697, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=691) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,336 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147388017 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,336 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=698, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,336 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,336 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147388017 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2076, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=690) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389021 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2079, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147389021 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,397 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2067, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=686) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474858 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2070, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474858 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2068, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=686) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,398 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474858 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,398 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2071, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=686, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474857 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,488 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,490 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:00,490 TRACE Oracle|vk_nau27|streaming Current time 1692961020490 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:00,490 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,491 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:00,491 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599165 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:00,491 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,493 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599166' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,498 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,500 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599068, endScn=289931599165, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:00,500 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599069', endScn => '289931599165', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,508 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599068, 289931599165] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599079, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:57Z, transactionId='0f002000e1e52900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbbd.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Transaction 0f002000e1e52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599082, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:56:57Z, transactionId='1300140052b22b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbc3.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Transaction 1300140052b22b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599089, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='1100180039292b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbdc.010c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Transaction 1100180039292b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599091) LogMinerEventRow{scn=289931599092, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0e000f00e34c2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbed.019c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Transaction 0e000f00e34c2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599095, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='080017003d582e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bbef.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,616 TRACE Oracle|vk_nau27|streaming Transaction 080017003d582e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599098, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='12000600f5492a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc05.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 12000600f5492a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599100) LogMinerEventRow{scn=289931599101, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0a0021000b1d3100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc16.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 0a0021000b1d3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599104, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0b001200440a2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc17.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 0b001200440a2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599107, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='04001f006ed02a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc2d.005c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 04001f006ed02a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599109) LogMinerEventRow{scn=289931599110, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='02001900fbcd2700', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc38.0158, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 02001900fbcd2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599114, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='070011008cc82d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc3e.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 070011008cc82d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599117, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='1000200007302a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc47.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 1000200007302a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599121, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0d0018001bd52900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc4f.01ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 0d0018001bd52900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599124, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='090007008fef2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc59.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 090007008fef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599127, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0c000f00ec562b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc61.0180, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 0c000f00ec562b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_EVENT' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599130, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='03000200b0b12800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc6b.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 03000200b0b12800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599133, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='05000600a86f2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc7c.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 05000600a86f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599135) LogMinerEventRow{scn=289931599136, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='110013001b2c2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc8c.001c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 110013001b2c2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599138, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='14000c00b7b02b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc8c.00d4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 14000c00b7b02b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599140) LogMinerEventRow{scn=289931599141, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='02001e0029ce2700', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc94.008c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 02001e0029ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599144, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='1300080051be2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc95.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 1300080051be2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599147, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0e001700e63e2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bc9c.00ac, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 0e001700e63e2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599150, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='04000600d5cf2a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcab.002c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Transaction 04000600d5cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,617 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599153, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='06001100b9052a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcba.0174, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Transaction 06001100b9052a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599156, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0f0007003dee2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcc9.01b8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Transaction 0f0007003dee2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599159, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='080007002d5a2e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcd9.01a4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Transaction 080007002d5a2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599162, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='0a000d00be1e3100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcea.0024, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming Transaction 0a000d00be1e3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 DEBUG Oracle|vk_nau27|streaming Counters{rows=109, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=27, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 DEBUG Oracle|vk_nau27|streaming Processed in 1 ms. Lag: 618. Offset SCN: 289931599068, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599162, txIds=[0a000d00be1e3100]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:00,618 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,620 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,621 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2069, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=687) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474633 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,623 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:00,623 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2072, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,623 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474633 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,625 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:00,712 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:00,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:00,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=130, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=130, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,712 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=185, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=143) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=186, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=143, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 951 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2077, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=688) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,745 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2080, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,745 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,755 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,756 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,756 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:00,756 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:00,756 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,756 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2078, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=690) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,834 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2081, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,834 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388542 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=698, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=692) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,836 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147387517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,836 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=699, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,836 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,837 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147387517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2079, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=691) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2082, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388520 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2070, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=687) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474358 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,898 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2073, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,898 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,899 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474358 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,899 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2071, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=687) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474356 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:00,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2074, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=687, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:00,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474356 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2072, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=688) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474132 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,124 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2075, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,124 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,125 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147474132 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=186, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=144) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,236 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=187, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=144, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,236 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2080, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=689) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,245 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2083, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,245 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388131 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,256 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,257 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,257 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:01,257 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:01,257 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,257 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2081, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=691) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,335 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2084, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,335 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388041 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=699, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=693) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,338 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147387015 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,338 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=700, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,338 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,338 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147387015 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2082, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=692) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388020 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2085, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147388020 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2073, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=688) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473857 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,399 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2076, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,399 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473857 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2074, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=688) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473856 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2077, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=688, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473856 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,530 DEBUG || SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 30000 ms, remaining: 0 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,530 DEBUG || SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout expired [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,530 DEBUG || onFail FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,530 DEBUG || close HttpParser{s=START,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,530 DEBUG || START --> CLOSE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,530 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillInterestedFailed {} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,530 DEBUG || shutdownOutput SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OPEN,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,530 DEBUG || fillInterested HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,531 DEBUG || interested FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,531 DEBUG || changeInterests p=false 1->1 for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,531 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || ignored: WriteFlusher@10ce0452{IDLE}->null [org.eclipse.jetty.io.WriteFlusher] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Ignored idle endpoint SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@54647915 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Key interests updated 1 -> 1 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,531 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected oshut local=/10.0.2.144:8083 remote=/10.0.2.141:50638], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,531 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,531 DEBUG || task CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,531 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:01.531647+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,531 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:01.531832+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,531 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:01.531909+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,531 DEBUG || fillable FillInterest@3cf54187{AC.ReadCB@344b865a{HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,532 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:01.53201+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,532 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,532 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,532 DEBUG || shutdownInput SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,532 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || doClose SocketChannelEndPoint@7bb17d67{l=/10.0.2.144:8083,r=/10.0.2.141:50638,CLOSED,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,532 DEBUG || onClose FillInterest@3cf54187{null} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,532 DEBUG || Wakeup ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || queue org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@118e3f07 startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,532 DEBUG || filled -1 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,532 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || run org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@118e3f07 in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,532 DEBUG || atEOF HttpParser{s=CLOSE,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,532 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 0 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled -1 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,532 DEBUG || Destroyed SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,532 DEBUG || close SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,532 DEBUG || onClose HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,532 DEBUG || ran org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@118e3f07 in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,532 DEBUG || close(null) SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,532 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,532 DEBUG || parseNext s=CLOSE HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,532 DEBUG || CLOSE --> CLOSED [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,532 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=CLOSED,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,533 DEBUG || releaseRequestBuffer HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,533 DEBUG || shutdownOutput SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,533 DEBUG || HttpConnection@344b865a::SocketChannelEndPoint@7bb17d67{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@344b865a[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@5b51688{s=START}]=>HttpChannelOverHttp@75768c61{s=HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@71491617{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,533 DEBUG || ReservedThread@2da5db67{RUNNING,thread=Thread[qtp974320615-176,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,533 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,535 DEBUG || SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 30000 ms, remaining: 0 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,535 DEBUG || SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} idle timeout expired [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,535 DEBUG || onFail FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=START,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,535 DEBUG || close HttpParser{s=START,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,535 DEBUG || START --> CLOSE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,535 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillInterestedFailed {} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,535 DEBUG || shutdownOutput SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OPEN,fill=-,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,535 DEBUG || fillInterested HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,535 DEBUG || interested FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,535 DEBUG || changeInterests p=false 1->1 for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,536 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 on ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || Wakeup on submit ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || ignored: WriteFlusher@3cebe5aa{IDLE}->null [org.eclipse.jetty.io.WriteFlusher] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,536 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || Ignored idle endpoint SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,536 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 1 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=30000/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} idle timeout check, elapsed: 30000 ms, remaining: 0 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,536 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected oshut local=/10.0.2.144:8083 remote=/10.0.2.141:50662], selector=sun.nio.ch.EPollSelectorImpl@1e4e3519, interestOps=1, readyOps=1 SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} idle timeout expired [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:01,536 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,536 DEBUG || onFail FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=FI,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=START,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,536 DEBUG || close HttpParser{s=START,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,536 DEBUG || START --> CLOSE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,536 DEBUG || task CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,536 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} onFillInterestedFailed {} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,536 DEBUG || shutdownOutput SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OPEN,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,536 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@37dec463/SelectorProducer@3bf26810/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=1,epc=44]@2023-08-25T13:57:01.536506+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,536 DEBUG || fillInterested HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=-,flush=-,to=30001/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,536 DEBUG || interested FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,536 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || changeInterests p=false 1->1 for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,536 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=44]@2023-08-25T13:57:01.536638+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,536 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=1 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || ReservedThread@2da5db67{RESERVED,thread=Thread[qtp974320615-176,5,main]} task=EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=44]@2023-08-25T13:57:01.536723+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,536 DEBUG || ignored: WriteFlusher@22fa032{IDLE}->null [org.eclipse.jetty.io.WriteFlusher] java.util.concurrent.TimeoutException: Idle timeout expired: 30000/30000 ms at org.eclipse.jetty.io.IdleTimeout.checkIdleTimeout(IdleTimeout.java:171) at org.eclipse.jetty.io.IdleTimeout.idleCheck(IdleTimeout.java:113) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:01,536 DEBUG || fillable FillInterest@23c6502d{AC.ReadCB@79187a6c{HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,536 DEBUG || EatWhatYouKill@37dec463/SelectorProducer@3bf26810/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=1,epc=45]@2023-08-25T13:57:01.536848+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,536 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@19378523 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,536 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected oshut local=/10.0.2.144:8083 remote=/10.0.2.141:50652], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,536 DEBUG || Ignored idle endpoint SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,537 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || shutdownInput SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,537 DEBUG || task CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || doClose SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:01.537193+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,537 DEBUG || onClose FillInterest@23c6502d{null} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,537 DEBUG || Key interests updated 0 -> 0 on SocketChannelEndPoint@36e288fb{l=/10.0.2.144:8083,r=/10.0.2.141:50662,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || Wakeup ManagedSelector@19213a74{STARTED} id=1 keys=1 selected=1 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:01.537256+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 processing 0 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || fillable FillInterest@3173070e{AC.ReadCB@502a41c0{HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,537 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:01.537291+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,537 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,537 DEBUG || queue org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@1d509bd2 startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,537 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:01.537428+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:01,537 DEBUG || filled -1 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@33d564a1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1e4e3519 waiting with 0 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || run org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@1d509bd2 in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,537 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || atEOF HttpParser{s=CLOSE,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,537 DEBUG || Key interests updated 0 -> 0 on SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || Destroyed SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} filled -1 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,537 DEBUG || onClose HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,537 DEBUG || ran org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@1d509bd2 in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,537 DEBUG || shutdownInput SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,OSHUT,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,537 DEBUG || close SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,537 DEBUG || doClose SocketChannelEndPoint@1cb2ea6f{l=/10.0.2.144:8083,r=/10.0.2.141:50652,CLOSED,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,537 DEBUG || onClose FillInterest@3173070e{null} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:01,537 DEBUG || Wakeup ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,537 DEBUG || close(null) SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,537 DEBUG || queue org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@362dc08f startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,538 DEBUG || filled -1 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:01,538 DEBUG || atEOF HttpParser{s=CLOSE,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,538 DEBUG || run org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@362dc08f in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || parseNext s=CLOSE HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,538 DEBUG || CLOSE --> CLOSED [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=0/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} filled -1 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || Destroyed SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=0/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=CLOSED,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || close SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,538 DEBUG || onClose HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:01,538 DEBUG || ran org.eclipse.jetty.io.ManagedSelector$DestroyEndPoint@362dc08f in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:01,538 DEBUG || releaseRequestBuffer HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=2/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || close(null) SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSE,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || parseNext s=CLOSE HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,538 DEBUG || CLOSE --> CLOSED [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:01,538 DEBUG || shutdownOutput SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=2/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=CLOSED,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@79187a6c::SocketChannelEndPoint@36e288fb{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=2/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@79187a6c[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@48abe408{s=START}]=>HttpChannelOverHttp@38257057{s=HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=17,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@3c1ae38b{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,538 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,538 DEBUG || releaseRequestBuffer HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || shutdownOutput SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:01,538 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || HttpConnection@502a41c0::SocketChannelEndPoint@1cb2ea6f{l=0.0.0.0/0.0.0.0:8083,r=null,CLOSED,fill=-,flush=-,to=1/30000}{io=0/0,kio=-1,kro=-1}->HttpConnection@502a41c0[p=HttpParser{s=CLOSED,0 of -1},g=HttpGenerator@1feb4af1{s=START}]=>HttpChannelOverHttp@311ebe27{s=HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=18,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@27f128fa{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:01,538 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 0 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:01,538 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,538 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2075, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=689) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,625 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473631 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,625 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2078, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,626 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473630 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,688 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:01,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:01,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=188, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=188, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,688 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,688 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=187, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=145) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2952 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,736 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=189, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=145, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,736 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2952 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2083, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=690) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387630 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,746 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2086, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,746 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387630 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,757 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,758 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:01,758 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:01,758 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,758 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:01,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2084, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,835 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,835 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=692) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,836 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387541 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,836 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2087, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,836 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387540 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=700, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=694) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,838 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147386515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,838 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=701, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,838 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,838 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147386515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2085, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=693) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,856 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387520 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,856 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2088, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387520 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:57:01,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2076, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=689) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473356 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2079, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473356 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2077, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,900 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=689) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473356 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:01,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2080, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=689, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:01,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473355 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2078, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=690) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473130 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,126 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2081, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,126 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,127 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147473130 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=189, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=146) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,237 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,237 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=190, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=146, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,237 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,237 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2451 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2086, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=691) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387129 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,247 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2089, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,247 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387129 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,258 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,259 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,259 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:02,259 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:02,259 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,259 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2087, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=693) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,336 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2090, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,336 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,337 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,337 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,337 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,337 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,337 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387040 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=701, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=695) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,339 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147386014 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=702, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,339 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,339 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147386014 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2088, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=694) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387019 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,357 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2091, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147387019 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2079, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,400 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=690) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472856 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,401 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2082, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,401 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472855 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2080, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=690) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472854 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2083, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=690, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472854 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2081, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=691) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,627 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472629 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,627 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2084, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,628 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472628 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=190, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=147) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,738 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1950 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,738 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=191, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=147, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,738 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,738 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1950 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2089, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=692) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386628 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,748 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2092, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,748 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386628 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,759 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,760 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,760 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:02,760 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:02,760 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,760 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2090, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=694) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,837 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2093, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,837 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386539 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=702, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=696) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,839 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147385514 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,839 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=703, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,839 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,839 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147385514 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2091, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=695) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2094, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2082, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=691) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,901 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472355 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,901 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2085, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472355 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2083, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=691) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472354 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:02,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2086, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=691, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:02,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472353 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2084, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,128 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=692) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472128 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,129 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2087, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,129 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147472127 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=191, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=148) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,239 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1449 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,239 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=192, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=148, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,239 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,239 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1449 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2092, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=693) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386127 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,249 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2095, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,249 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386127 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,260 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,261 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:03,261 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:03,261 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,261 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2093, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=695) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386038 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,338 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2096, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,338 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386038 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=703, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,339 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,340 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,340 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=697) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,340 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,340 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,340 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147385014 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,340 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=704, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,340 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,340 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147385013 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2094, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=696) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2097, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147386018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2085, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=692) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471854 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,402 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2088, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,402 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471854 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2086, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=692) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471852 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2089, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=692, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471852 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,625 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,627 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:03,627 TRACE Oracle|vk_nau27|streaming Current time 1692961023627 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:03,627 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,628 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:03,628 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599179 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:03,628 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,629 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2087, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,629 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,629 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,629 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599180' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=693) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471627 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,630 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2090, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,630 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471626 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,634 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,636 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599162, endScn=289931599179, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:03,636 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599163', endScn => '289931599179', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,645 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599162, 289931599179] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,713 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:03,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:03,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=131, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=131, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,713 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:03,720 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,720 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599167, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='12000400ec4c2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcec.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,720 TRACE Oracle|vk_nau27|streaming Transaction 12000400ec4c2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,720 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599170, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:00Z, transactionId='01000e0020bb2500', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcf2.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,720 TRACE Oracle|vk_nau27|streaming Transaction 01000e0020bb2500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,721 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,721 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3721. Offset SCN: 289931599162, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599170, txIds=[01000e0020bb2500]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:03,721 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,723 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,725 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,727 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:03,729 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=192, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=149) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,740 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 948 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,740 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=193, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=149, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,740 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,740 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 948 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2095, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=694) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385626 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,750 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2098, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,750 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385626 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,761 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,762 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,762 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:03,762 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:03,762 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,762 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2096, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=696) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,839 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385537 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,839 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2099, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385536 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=704, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=698) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,841 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147384512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=705, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,841 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,841 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147384512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2097, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=697) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385518 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2100, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385518 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2088, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,902 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=693) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471354 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,903 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2091, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,903 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471353 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2089, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=693) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471352 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:03,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2092, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=693, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:03,905 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471352 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2090, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=694) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,130 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471126 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,131 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2093, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,131 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147471125 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=193, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=150) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,241 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 447 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,241 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=194, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=150, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,241 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,241 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 447 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2098, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=695) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385125 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,251 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2101, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,251 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385125 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,262 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,263 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,263 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:04,263 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:04,263 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,263 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2099, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=697) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,340 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2102, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,340 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=705, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=699) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,341 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147384012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,341 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=706, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,341 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,341 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147384012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2100, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=698) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2103, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147385018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2091, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=694) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470853 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,403 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2094, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,403 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470853 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2092, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,404 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=694) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470852 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2095, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=694, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470851 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2093, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=695) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470625 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,631 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2096, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,631 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,632 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470625 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,689 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:04,689 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:04,689 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=195, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,689 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=195, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,689 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,689 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=194, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=151) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,742 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2947 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,742 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=196, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=151, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,742 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,742 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2947 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2101, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=696) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384624 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2104, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384624 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,763 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,764 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,764 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:04,764 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:04,764 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,764 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:04,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2102, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,840 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,840 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=698) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384536 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2105, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=706, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=700) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,841 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147383512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,841 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=707, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,842 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,842 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147383511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2103, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=699) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,858 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384518 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2106, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2094, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=695) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470352 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,904 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2097, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,904 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470352 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,905 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2095, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=695) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470350 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:04,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2098, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=695, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:04,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470350 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,095 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2096, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=696) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470124 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,132 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2099, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,132 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,133 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147470124 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=196, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=152) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,243 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2446 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,243 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=197, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=152, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,243 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,243 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2446 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2104, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=697) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384124 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,252 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2107, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,252 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384124 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,264 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,265 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:05,265 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:05,265 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,265 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2105, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=699) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384035 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,341 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2108, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,341 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384035 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,342 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=707, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,342 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=701) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,343 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147383011 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,343 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=708, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,343 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,343 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147383010 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2106, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=700) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2109, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147384016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2097, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=696) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,405 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469851 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,406 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2100, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,406 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469850 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2098, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=696) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469849 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2101, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=696, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469849 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2099, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=697) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,633 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2102, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,633 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,634 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=197, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=153) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,744 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1945 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,744 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=198, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=153, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,744 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,744 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1945 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2107, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=698) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383624 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,752 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2110, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,752 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383624 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,765 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,766 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,766 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:05,766 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:05,766 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,766 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:05,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2108, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,841 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,841 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=700) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383535 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,842 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2111, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,842 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383534 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=708, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=702) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,844 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147382509 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,844 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=709, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,844 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,844 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147382509 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2109, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=701) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383516 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2112, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383516 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2100, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=697) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469350 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,906 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2103, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,906 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469350 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2101, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=697) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469349 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:05,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2104, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=697, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:05,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469349 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2102, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=698) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469122 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,134 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2105, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,134 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,135 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147469122 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=198, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=154) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=199, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=154, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2110, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=699) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383123 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2113, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383123 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,266 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,267 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:06,267 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:06,267 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,267 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2111, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=701) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383034 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,342 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2114, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,342 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383034 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=709, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=703) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,344 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147382009 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,344 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=710, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,344 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,344 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147382009 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2112, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=702) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2115, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147383016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2103, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=698) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,407 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468849 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2106, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2104, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,407 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=698) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468849 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2107, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=698, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468848 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,529 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,531 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:06,531 TRACE Oracle|vk_nau27|streaming Current time 1692961026531 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:06,531 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,532 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:06,532 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599191 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:06,532 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,533 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599192' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,538 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,540 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599170, endScn=289931599191, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:06,540 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599171', endScn => '289931599191', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,548 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599170, 289931599191] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599181, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:03Z, transactionId='0700160029c12d00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcf8.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming Transaction 0700160029c12d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599184, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:03Z, transactionId='09001c003dee2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bcff.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming Transaction 09001c003dee2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3629. Offset SCN: 289931599170, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599184, txIds=[09001c003dee2f00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:06,629 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,632 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,634 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2105, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=699) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468621 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,635 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2108, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,635 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,636 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468621 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,636 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:06,638 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:06,714 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:06,714 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:06,714 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=132, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,714 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=132, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,714 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=199, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=155) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,745 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,745 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=200, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=155, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,745 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,745 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2113, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=700) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2116, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,767 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,768 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,768 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:06,768 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:06,768 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,768 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2114, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=702) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382533 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,843 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2117, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,843 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382533 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=710, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=704) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,845 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147381508 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,845 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=711, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,845 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,845 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147381508 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2115, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=703) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2118, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:57:06,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:57:06,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2106, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,907 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,907 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=699) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468349 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,908 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2109, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,908 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468348 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2107, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=699) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468347 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:06,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2110, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=699, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:06,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468347 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2108, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=700) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,136 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468120 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,136 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2111, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,137 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147468119 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=200, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=156) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,245 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=201, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=156, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,245 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2116, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=701) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382123 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,253 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2119, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,253 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382123 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,268 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,269 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,269 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:07,269 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:07,269 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,269 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2117, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=703) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382032 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,344 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2120, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,344 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382032 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=711, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=705) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,345 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147381008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,345 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=712, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,345 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,346 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147381008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2118, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=704) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382015 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2121, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147382014 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2109, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=700) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,408 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467848 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,408 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2112, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467847 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,409 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2110, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,409 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,409 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,409 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=700) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467847 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2113, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=700, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467846 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2111, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=701) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,637 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2114, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,637 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,638 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,690 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:07,690 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:07,690 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=202, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,690 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=202, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,690 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,690 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=201, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=157) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=203, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=157, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2119, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=702) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,753 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2122, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,753 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381623 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,769 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,770 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,770 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:07,770 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:07,770 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,770 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:07,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2120, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,844 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,844 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=704) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381532 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2123, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381531 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=712, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=706) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,846 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147380507 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,846 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=713, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,846 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,846 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147380507 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2121, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=705) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381513 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,863 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2124, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,863 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381513 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2112, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=701) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467347 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,909 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2115, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,909 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467347 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2113, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=701) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467346 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:07,910 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2116, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=701, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:07,910 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467346 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2114, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=702) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,138 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2117, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,138 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,139 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147467118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=203, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=158) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,246 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,246 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=204, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=158, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,246 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,246 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2444 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2122, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=703) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381122 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,254 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2125, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,254 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381122 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,270 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,271 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:08,271 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:08,271 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,271 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2123, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=705) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381031 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,345 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2126, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,345 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381031 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=713, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=707) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,346 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147380007 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,346 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=714, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,347 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,347 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147380006 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2124, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=706) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381013 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2127, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147381012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2115, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=702) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,410 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466846 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,410 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2118, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466845 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2116, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=702) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466845 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,411 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2119, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=702, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,411 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466845 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2117, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=703) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,639 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466617 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,639 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2120, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,640 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466616 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=204, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=159) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,746 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=205, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=159, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,746 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1944 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2125, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=704) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380622 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,754 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2128, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,754 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380622 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,771 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,772 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,772 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:08,772 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:08,772 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,772 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:08,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2126, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,845 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,845 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=706) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380531 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2129, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380530 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=714, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=708) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147379506 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=715, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147379506 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2127, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=707) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2130, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2118, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=703) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466345 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2121, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466345 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2119, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,911 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=703) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:08,912 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466345 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:08,912 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2122, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=703, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:08,912 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466344 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,140 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2120, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,140 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,140 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,140 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=704) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466116 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,141 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2123, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,141 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147466115 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=205, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=160) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,247 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1443 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,247 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=206, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=160, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,247 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,247 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1443 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2128, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=705) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380121 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,255 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2131, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,255 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380121 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,272 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,273 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,273 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:09,273 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:09,273 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,273 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2129, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=707) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,346 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2132, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,346 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380030 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=715, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=709) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,347 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147379006 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=716, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,347 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,347 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147379006 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2130, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=708) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2133, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147380012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2122, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2121, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=704) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,412 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=704) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465844 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2124, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,413 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2125, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=704, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,413 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465843 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,638 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,640 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:09,640 TRACE Oracle|vk_nau27|streaming Current time 1692961029640 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:09,640 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,641 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:09,641 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599206 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:09,641 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2123, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,641 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=705) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465615 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,642 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2126, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,642 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465614 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,642 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599207' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,647 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,649 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599184, endScn=289931599206, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:09,649 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599185', endScn => '289931599206', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,657 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599184, 289931599206] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,715 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:09,715 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:09,715 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=133, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,715 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=133, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,715 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599193, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:06Z, transactionId='0b002000250f2f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd03.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Transaction 0b002000250f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599196, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:06Z, transactionId='0d00100062d62900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd09.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Transaction 0d00100062d62900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,736 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599201, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:07Z, transactionId='05001d00ca6f2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd0f.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,737 TRACE Oracle|vk_nau27|streaming Transaction 05001d00ca6f2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,737 DEBUG Oracle|vk_nau27|streaming Counters{rows=7, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=3, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,737 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 2737. Offset SCN: 289931599184, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599201, txIds=[05001d00ca6f2b00]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:09,737 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,738 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,740 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,742 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:09,744 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=206, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=161) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,748 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 942 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,748 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=207, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=161, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,748 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,748 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 942 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2131, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=706) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379620 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,756 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2134, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,756 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379620 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,773 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,774 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,774 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:09,774 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:09,774 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,774 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:09,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2132, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,846 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,846 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=708) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379530 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2135, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=716, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=710) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147378506 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,847 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=717, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,847 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147378506 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2133, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=709) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2136, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2124, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=705) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465343 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2127, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465343 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2125, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,913 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=705) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465343 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:09,914 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2128, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=705, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:09,914 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465342 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2126, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,142 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=706) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,143 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465114 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,143 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2129, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,143 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147465113 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=207, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=162) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 441 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=208, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=162, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 441 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2134, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=707) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379119 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2137, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379119 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,274 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,275 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,275 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:10,275 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:10,275 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,275 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2135, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=709) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379029 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2138, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,347 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379029 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=717, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,347 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,348 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,348 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=711) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,348 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,348 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,348 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147378006 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,348 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=718, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,348 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,348 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147378005 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2136, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=710) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379011 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2139, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147379011 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2127, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=706) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,414 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464842 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,414 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2130, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464841 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2128, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=706) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464841 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,415 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2131, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=706, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,415 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464841 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2129, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=707) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464613 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,643 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2132, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,643 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464613 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,691 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:10,691 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:10,691 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=209, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,691 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=209, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,691 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,691 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=208, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=163) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,749 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2942 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,749 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=210, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=163, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,749 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,749 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2942 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2137, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=708) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2140, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,775 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,776 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:10,776 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:10,776 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,776 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=718, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=712) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147377505 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=719, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2138, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147377505 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=710) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,848 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378528 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,848 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2141, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,849 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378527 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2139, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=711) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2142, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2130, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=707) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464341 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2133, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464341 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2131, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,915 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=707) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:10,916 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464341 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:10,916 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2134, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=707, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:10,916 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464340 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2132, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=708) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464112 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,144 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2135, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,144 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,145 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,145 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,145 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,145 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147464112 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=210, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=164) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2442 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,249 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=211, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=164, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,249 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2442 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2140, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=709) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378119 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,257 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2143, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,257 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378119 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,276 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,277 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,277 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:11,277 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:11,277 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,277 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=719, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=713) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147377004 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=720, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2141, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147377004 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=711) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,349 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2144, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,349 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378027 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2142, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=712) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378010 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2145, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147378010 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2133, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=708) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,416 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463840 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,416 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2136, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463839 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2134, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=708) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463839 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,417 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2137, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=708, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,417 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463839 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2135, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=709) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463611 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,645 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2138, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,645 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,646 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463611 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=211, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=165) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,750 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1941 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,750 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=212, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=165, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,750 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,750 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1941 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2143, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=710) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,757 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2146, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,757 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377619 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,777 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,778 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,778 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:11,778 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:11,778 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,778 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=720, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=714) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,849 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147376504 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=721, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2144, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,849 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,849 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147376504 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=712) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377527 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,850 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2147, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,850 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377526 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2145, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=713) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377510 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,866 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2148, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,866 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377510 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:57:11,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2136, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=709) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463339 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2139, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463339 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2137, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,917 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,917 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=709) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463339 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:11,918 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2140, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=709, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:11,918 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463338 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2138, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=710) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,146 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463110 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,146 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2141, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,147 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147463109 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=212, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=166) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=213, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=166, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2146, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=711) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2149, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,278 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,279 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:12,279 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:12,279 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,279 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=721, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2147, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=715) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147376003 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=722, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=713) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2150, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147376003 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,350 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377026 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2148, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=714) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377010 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2151, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147377010 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2140, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2139, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=710) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,418 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=710) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462838 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2142, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,419 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2143, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=710, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,419 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462837 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,544 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,545 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:12,545 TRACE Oracle|vk_nau27|streaming Current time 1692961032545 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:12,545 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,547 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:12,547 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599229 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:12,547 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,548 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599230' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,552 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,554 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599201, endScn=289931599229, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:12,554 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599202', endScn => '289931599229', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,562 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599201, 289931599229] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599208, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='03001b00f8b42800', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd12.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 03001b00f8b42800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599211, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='10000200ce382a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd19.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 10000200ce382a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599214, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='13000d00f0b72b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd1d.0114, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 13000d00f0b72b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599217, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='1400020071af2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd20.00e4, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 1400020071af2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599219) LogMinerEventRow{scn=289931599220, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='0e0006008e492900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd49.0084, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 0e0006008e492900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599222, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:09Z, transactionId='0e000b0061462900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd56.00dc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming Transaction 0e000b0061462900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 DEBUG Oracle|vk_nau27|streaming Counters{rows=13, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=6, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3647. Offset SCN: 289931599201, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599222, txIds=[0e000b0061462900]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2141, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=711) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462609 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,647 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2144, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,647 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,648 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462609 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,650 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,651 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,653 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:12,655 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:12,716 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:12,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:12,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=134, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=134, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=213, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=167) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,751 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,751 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=214, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=167, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,751 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,751 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2149, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=712) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376618 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,758 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2152, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,758 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376618 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,779 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,780 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,780 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:12,780 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:12,780 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,780 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=722, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2150, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=716) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147375502 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=723, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=714) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376525 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,851 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2153, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147375502 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,851 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376525 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2151, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=715) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376509 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2154, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376509 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2143, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2142, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=711) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=711) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462337 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2145, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,919 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2146, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=711, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:12,919 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:12,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462337 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2144, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,148 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=712) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,149 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,149 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2147, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,149 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147462107 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=214, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=168) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,251 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=215, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=168, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,251 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2152, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=713) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,258 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2155, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,258 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376118 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,280 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,281 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:13,281 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:13,281 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,281 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2153, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,351 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,351 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=723, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,351 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,351 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=715) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376025 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2156, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=717) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147375002 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,352 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=724, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376024 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,352 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147375001 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2154, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=716) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2157, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147376008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2145, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2146, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=712) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=712) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461836 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2148, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,420 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2149, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=712, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,420 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461836 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2147, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=713) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461607 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,649 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2150, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,649 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,650 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461607 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,692 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:13,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:13,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=216, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=216, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,692 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,692 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=215, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=169) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=217, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=169, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2155, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=714) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375617 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2158, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375617 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,781 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,782 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:13,782 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:13,782 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,782 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2156, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=716) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2159, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=724, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375524 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=718) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147374501 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,852 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=725, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,852 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147374501 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2157, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=717) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375508 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2160, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375508 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,920 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2149, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,920 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,920 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,920 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=713) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461336 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2151, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461335 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2148, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=713) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461335 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:13,921 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2152, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=713, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,921 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:13,922 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461335 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2150, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=714) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,150 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461106 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,150 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2153, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,151 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147461106 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=217, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=170) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,252 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,252 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=218, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=170, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,252 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,252 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2440 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2158, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=715) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375117 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,259 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2161, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,259 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375117 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,282 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,283 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:14,283 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:14,283 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,283 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2159, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=717) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=725, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375023 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2162, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375023 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=719) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147374000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,353 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=726, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,353 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147374000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2160, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=718) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,368 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2163, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,368 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147375008 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2151, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=714) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,421 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460835 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,421 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2154, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460834 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2152, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=714) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460834 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,422 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2155, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=714, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,422 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460834 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2153, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=715) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460605 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,651 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2156, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,651 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,652 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460605 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=218, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=171) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,752 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=219, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=171, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,752 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1940 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2161, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=716) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,759 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374617 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,759 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2164, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,760 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374616 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,783 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,784 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,784 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:14,784 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:14,784 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,784 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2162, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=726, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=720) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=718) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,853 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147373500 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374523 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,854 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=727, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,854 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2165, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147373499 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,854 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374522 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2163, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,868 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,868 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=719) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,869 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374508 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,869 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2166, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,869 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374507 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2155, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2154, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=715) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=715) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460333 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2157, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,923 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2158, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=715, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:14,923 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460333 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,095 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:57:15,152 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2156, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,152 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,152 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,152 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,152 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,152 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=716) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460104 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,153 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2159, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,153 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147460103 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=219, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=172) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,253 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1439 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,253 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=220, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=172, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,253 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,253 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1439 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2164, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=717) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,260 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374116 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,260 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2167, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,261 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374116 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,284 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,285 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:15,285 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:15,285 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,285 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2165, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=719) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=727, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2168, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=721) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,354 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147372999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,354 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=728, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,355 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,355 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147372998 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2166, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=720) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,369 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374007 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,369 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2169, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,370 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147374007 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,423 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2158, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,423 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,423 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,423 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=716) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459833 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2160, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459832 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2157, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=716) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459832 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,424 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2161, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=716, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,424 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459832 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2159, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=717) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,653 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459603 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,653 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2162, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,654 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459602 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,655 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,657 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:15,657 TRACE Oracle|vk_nau27|streaming Current time 1692961035657 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:15,657 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,658 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:15,658 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599283 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:15,658 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,659 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599284' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,664 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,666 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599222, endScn=289931599283, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:15,666 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599223', endScn => '289931599283', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,678 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599222, 289931599283] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,716 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:15,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:15,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=135, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=135, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=220, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=173) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,754 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 938 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,754 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=221, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=173, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,754 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,754 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 938 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2167, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=718) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373615 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,761 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2170, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,761 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373615 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,781 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599231, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:12Z, transactionId='04001900a2d52a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd5a.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 04001900a2d52a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599234, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:12Z, transactionId='06001700920b2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd60.0144, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 06001700920b2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599241, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0c001a00555b2b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd78.00ec, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0c001a00555b2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599244, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0900040069ef2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd7b.0144, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0900040069ef2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599247, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='08001800d9602e00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bd89.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 08001800d9602e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599250, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='01000200b0b82500', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bda0.0028, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 01000200b0b82500 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599252) LogMinerEventRow{scn=289931599253, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='1000210090352a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdb0.01d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 1000210090352a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599256, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0b002100fd0f2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdb2.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0b002100fd0f2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599259, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0600100029de2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdb4.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0600100029de2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_SEQUENCE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599261) LogMinerEventRow{scn=289931599262, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='13000e0023b62b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdbc.00fc, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 13000e0023b62b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599265, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0f001c0062f12900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdbd.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0f001c0062f12900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_MAIL_LOG_RECORD' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599268, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='11000d0045282b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdc4.003c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 11000d0045282b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599271, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0a002000c61b3100', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdd4.0028, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0a002000c61b3100 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599274, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='0e001200dc3d2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bde4.00b0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 0e001200dc3d2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599277, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='02001200a3ce2700', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002bdf2.00d0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 02001200a3ce2700 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599280, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='05001b00d3712b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be01.013c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,782 TRACE Oracle|vk_nau27|streaming Transaction 05001b00d3712b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,783 DEBUG Oracle|vk_nau27|streaming Counters{rows=63, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=16, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,783 DEBUG Oracle|vk_nau27|streaming Processed in 1 ms. Lag: 782. Offset SCN: 289931599222, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599280, txIds=[05001b00d3712b00]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:15,783 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,785 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,785 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,786 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:15,786 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:15,786 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,786 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:15,787 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,788 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:15,790 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2168, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=720) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2171, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=728, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=722) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,856 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147372497 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,856 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=729, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,856 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,856 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147372497 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2169, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=721) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373505 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,871 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2172, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,871 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373505 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2160, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2161, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=717) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=717) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459331 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2163, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,925 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2164, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=717, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,925 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:15,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459331 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2162, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=718) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,154 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459102 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,154 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2165, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,155 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147459101 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=221, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=174) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,255 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 437 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,255 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=222, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=174, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,255 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,255 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 437 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2170, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=719) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373114 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,262 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2173, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,262 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373114 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,286 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,287 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,287 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:16,287 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:16,287 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,287 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2171, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,354 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,354 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=721) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373022 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,355 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2174, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,355 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373021 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=729, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=723) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,356 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147371997 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,356 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=730, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,356 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,356 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147371997 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2172, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=722) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373005 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,371 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2175, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,371 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,372 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,372 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147373005 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,425 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2164, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,425 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,425 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,425 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=718) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458831 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2166, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2163, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=718) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458830 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,426 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2167, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=718, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,426 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458830 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2165, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=719) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458601 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,655 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2168, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,655 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,656 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458601 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,693 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:16,693 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:16,693 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=223, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,693 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=223, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,693 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,693 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=222, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=175) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,756 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2937 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,756 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=224, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=175, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,756 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,756 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2937 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2173, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=720) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372613 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,763 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2176, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,763 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372613 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,787 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,788 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,788 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:16,788 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:16,788 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,788 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2174, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=722) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,855 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2177, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,855 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372521 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=730, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=724) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,857 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147371496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,857 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=731, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,857 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,857 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147371496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2175, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=723) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372504 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,872 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2178, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,872 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372504 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:57:16,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2166, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=719) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,926 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458330 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,926 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2169, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2167, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=719) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458329 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:16,927 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2170, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=719, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:16,927 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458329 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2168, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=720) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458100 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,156 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2171, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,156 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,157 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147458100 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,207 DEBUG || Queued change lazy=false Accept@179839be[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016]] on ManagedSelector@5981f2c6{STARTED} id=2 keys=0 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,207 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=0 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,207 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,207 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || update Accept@179839be[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016]] [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || queue Accept@179839be[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016]] startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,208 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || run Accept@179839be[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,208 DEBUG || SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> idle timeout check, elapsed: 0 ms, remaining: 30000 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:17,208 DEBUG || new HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} -> SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->,null,HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,208 DEBUG || New HTTP Connection HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,208 DEBUG || Queued change lazy=true org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@1eb8ad4d on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || onOpen SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:17,208 DEBUG || onOpen HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,208 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,208 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,208 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,208 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 2 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || updateable 2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || update org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@1eb8ad4d [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,208 DEBUG || Created SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || ran Accept@179839be[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,209 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=0}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,209 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,209 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,209 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:57:17.209491+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,209 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:57:17.209593+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,209 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=19]@2023-08-25T13:57:17.209647+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,209 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,209 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:57:17.209751+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,209 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,209 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,209 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,209 DEBUG || filled 129 HeapByteBuffer@360c19be[p=0,l=129,c=8192,r=129]={<<>>applicati...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,209 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@360c19be[p=0,l=129,c=8192,r=129]={<<>>applicati...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,209 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=129,c=8192,r=129]={<<>>applicati...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,210 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=129,c=8192,r=129]={<<>>applicati...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:User-Agent --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:User-Agent --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:User-Agent --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Host --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Host --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Host --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Accept --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Accept --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER:Accept --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,210 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,210 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,210 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,210 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,210 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,210 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,210 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,210 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,210 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,210 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,210 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,210 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,210 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,212 DEBUG || write(array HeapByteBuffer@76a12bc9[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,212 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,212 DEBUG || sendResponse info=null content=HeapByteBuffer@21baeb59[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,212 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,212 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6cc4dff5] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,212 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@21baeb59[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,212 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,212 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,212 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6cc4dff5] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,212 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@21baeb59[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,212 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,212 DEBUG || flushed 173 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,212 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,212 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,212 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6cc4dff5] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,212 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,213 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,213 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,213 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 DEBUG || complete(org.eclipse.jetty.util.Callback$3@71bf8195) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,213 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,213 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,213 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,213 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,213 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,213 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,213 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,213 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,213 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,213 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,213 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,213 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,213 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,213 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,213 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,213 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,213 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,213 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,214 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,214 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,214 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,214 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,214 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,214 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,214 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,214 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,214 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,214 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,214 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,214 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,214 DEBUG || Queued change lazy=false Accept@316c2787[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020]] on ManagedSelector@6942ee48{STARTED} id=3 keys=0 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=0 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Queued change lazy=false Accept@1e15abac[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032]] on ManagedSelector@2dd08ff1{STARTED} id=0 keys=0 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=0 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || update Accept@1e15abac[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032]] [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/0 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || queue Accept@1e15abac[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032]] startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,215 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || update Accept@316c2787[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020]] [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || run Accept@1e15abac[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,215 DEBUG || queue Accept@316c2787[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020]] startThread=0 [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,215 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || run Accept@316c2787[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=0,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,215 DEBUG || SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> idle timeout check, elapsed: 0 ms, remaining: 30000 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:17,215 DEBUG || SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> idle timeout check, elapsed: 0 ms, remaining: 30000 ms [org.eclipse.jetty.io.IdleTimeout] 2023-08-25 13:57:17,215 DEBUG || new HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} -> SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->,null,HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,215 DEBUG || new HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} -> SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->,null,HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,215 DEBUG || New HTTP Connection HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,215 DEBUG || New HTTP Connection HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}-> [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,215 DEBUG || Queued change lazy=true org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@55e67276 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || Queued change lazy=true org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@382cead9 on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,215 DEBUG || onOpen SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:17,215 DEBUG || onOpen SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractEndPoint] 2023-08-25 13:57:17,215 DEBUG || onOpen HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,215 DEBUG || onOpen HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,215 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,215 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,215 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,215 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,215 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 2 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 2 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || updateable 2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || update org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@382cead9 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || updateable 2 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || update org.eclipse.jetty.io.ManagedSelector$$Lambda$757/0x00000008408db040@55e67276 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Created SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || ran Accept@1e15abac[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=0,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,216 DEBUG || Created SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=0}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=0}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || ran Accept@316c2787[java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020]] in QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=1,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}] [org.eclipse.jetty.util.thread.QueuedThreadPool] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.216536+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,216 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.216585+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,216 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:17.216649+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,216 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,216 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.216764+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,216 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,216 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:17.216755+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,216 DEBUG || filled 143 HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,216 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.216871+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,216 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.216908+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,216 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,216 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.216835+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,217 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,217 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:User-Agent --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:User-Agent --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:User-Agent --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Host --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Host --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Host --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Accept --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Accept --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER:Accept --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,217 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,216 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,217 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,216 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,217 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,217 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,217 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,217 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,217 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,218 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,217 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,217 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,218 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:User-Agent --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,218 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=URI,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=0,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,218 DEBUG || HEADER:User-Agent --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,218 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,218 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,218 DEBUG || HEADER:User-Agent --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,218 DEBUG || HEADER:Host --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:Host --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:Host --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:Accept --> VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,218 DEBUG || HEADER:Accept --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER:Accept --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,218 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,218 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,218 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,218 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,218 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,218 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,218 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,218 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,218 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,218 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,218 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,218 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,218 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,218 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,219 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961037219 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 694690ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,219 DEBUG || write(array HeapByteBuffer@7ab1f661[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || write(array HeapByteBuffer@3256c219[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || sendResponse info=null content=HeapByteBuffer@4dd0f984[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || sendResponse info=null content=HeapByteBuffer@64a5aa2b[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6290f55] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@4dd0f984[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6290f55] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2b0b65d5] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@4dd0f984[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@64a5aa2b[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,220 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2b0b65d5] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || flushed 1047 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,220 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>cation/js...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@64a5aa2b[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6290f55] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || flushed 1671 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,220 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,220 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,220 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,220 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2b0b65d5] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,220 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2681d4f1) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,220 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,220 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,220 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,220 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,220 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,220 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || complete(org.eclipse.jetty.util.Callback$3@7da6292e) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,221 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,221 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,221 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,221 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,221 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:57:17.221583+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,221 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,221 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:57:17.221688+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,221 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,221 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:57:17.221767+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,221 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,221 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,221 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=1,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=20]@2023-08-25T13:57:17.221652+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,221 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,221 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,222 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,222 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,222 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=7/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,222 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,222 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,222 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,222 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,222 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,222 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,222 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,223 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,223 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,223 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,222 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,222 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,223 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,223 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,223 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,223 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,223 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,223 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,223 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,223 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,223 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,223 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,223 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.223526+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,223 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,223 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,223 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.223594+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,223 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,223 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.223637+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,223 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,223 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:17.223723+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,223 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,223 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,223 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,223 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,224 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,224 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,224 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,224 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,225 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,225 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>/json\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,225 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,225 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,225 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,225 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,225 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,225 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,225 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,225 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,225 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,225 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,225 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,225 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,225 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,225 DEBUG || write(array HeapByteBuffer@76fb343f[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,225 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,225 DEBUG || sendResponse info=null content=HeapByteBuffer@4c4fb2e[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,225 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,225 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7924e191] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,226 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@4c4fb2e[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,226 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,226 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,226 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7924e191] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,226 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>\nson\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@4c4fb2e[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,226 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,226 DEBUG || flushed 311 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,226 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,226 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,226 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7924e191] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,226 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,226 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,226 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,226 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,226 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,226 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,226 DEBUG || complete(org.eclipse.jetty.util.Callback$3@21557f82) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,226 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,226 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,226 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,226 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,226 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,226 DEBUG || write(array HeapByteBuffer@a32ff66[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,226 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,226 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,226 DEBUG || sendResponse info=null content=HeapByteBuffer@621d9e[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,226 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,226 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,226 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,226 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@d5c972e] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,226 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,226 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@621d9e[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,227 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,227 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,227 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,227 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@d5c972e] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,227 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\n/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@621d9e[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,227 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,227 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,227 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || flushed 312 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,227 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,227 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,227 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,227 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,227 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@d5c972e] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,227 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,227 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,227 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,227 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,227 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,227 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,227 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,227 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,227 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,227 DEBUG || complete(org.eclipse.jetty.util.Callback$3@509d41a2) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,227 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,227 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.227911+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,227 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,227 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,228 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.228118+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,228 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,228 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,228 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.22803+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,228 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:17.22816+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,228 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,228 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=1,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,228 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,228 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,228 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,228 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,228 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,228 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,229 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,229 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,229 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,229 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:57:17.22909+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,229 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,229 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,229 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,229 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,229 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,229 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:57:17.229193+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,229 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,229 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:57:17.229168+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,229 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,229 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=21]@2023-08-25T13:57:17.229243+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,229 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,229 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,229 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,229 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,229 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,229 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,229 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,229 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,229 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,229 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,229 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,230 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,229 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,230 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,230 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,229 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,230 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,230 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961037230 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 694679ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,230 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,230 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,230 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,230 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,230 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,230 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,230 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,231 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,231 DEBUG || write(array HeapByteBuffer@3b0f54e[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,231 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,231 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,231 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,231 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || sendResponse info=null content=HeapByteBuffer@1aa760e1[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@294c81bf] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,231 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@1aa760e1[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,231 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,231 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,231 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@294c81bf] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,231 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1aa760e1[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,231 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,231 DEBUG || flushed 950 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,231 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,231 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,231 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@294c81bf] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,231 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,231 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,231 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,231 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || complete(org.eclipse.jetty.util.Callback$3@6f35b676) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,231 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,231 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,231 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,231 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,231 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,231 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,232 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,232 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=2,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,232 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,232 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,232 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,232 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,231 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,232 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,231 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,232 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,232 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,232 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,232 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,232 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,232 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,233 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:17.232999+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,233 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:17.233045+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,233 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,233 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,233 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,233 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,233 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,233 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,233 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,233 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:17.233431+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,233 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,233 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:17.233504+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,233 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,233 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,233 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,233 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,233 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,233 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,233 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961037233 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 694676ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,233 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,233 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,233 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,233 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,233 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,233 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,233 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,233 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,233 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,234 DEBUG || write(array HeapByteBuffer@51cb0a5f[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,234 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,234 DEBUG || sendResponse info=null content=HeapByteBuffer@63d0ac73[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@652a77d6] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,234 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@63d0ac73[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,234 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,234 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,234 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@652a77d6] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,234 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@63d0ac73[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,234 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,234 DEBUG || flushed 1574 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,234 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,234 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,234 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@652a77d6] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,234 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,234 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,234 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,234 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,234 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,234 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,234 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,234 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 DEBUG || complete(org.eclipse.jetty.util.Callback$3@3b6719f9) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,234 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,234 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,234 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,234 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,234 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,234 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,234 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,234 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,234 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,234 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,234 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,234 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,234 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,235 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,235 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,235 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,235 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,235 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,235 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,235 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,235 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:17.235697+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,235 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:17.23576+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,235 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,235 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || filled 148 HeapByteBuffer@7b3e78db[p=0,l=148,c=8192,r=148]={<<>>\r\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,235 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961037235 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 694674ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,235 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,236 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,235 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:17.235813+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,236 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,236 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,236 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.236288+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,235 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@7b3e78db[p=0,l=148,c=8192,r=148]={<<>>\r\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,236 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=148,c=8192,r=148]={<<>>\r\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,236 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=148,c=8192,r=148]={<<>>\r\nson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=2,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,236 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,236 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,236 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,236 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,236 DEBUG || write(array HeapByteBuffer@6bbdb104[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,236 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,236 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,236 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,236 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,236 DEBUG || sendResponse info=null content=HeapByteBuffer@3ae9b700[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,236 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,236 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,236 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,236 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,236 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,236 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,236 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@764a162a] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,236 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,236 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@3ae9b700[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,236 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,236 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,236 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,236 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,236 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@764a162a] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,237 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\n\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@3ae9b700[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,237 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,237 DEBUG || flushed 1071 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,237 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,237 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,237 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@764a162a] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,237 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:17,237 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,237 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,237 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,237 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,237 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,237 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961037237 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 694672ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:17,237 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2fa86140) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,237 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,237 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,237 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,237 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,237 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,237 DEBUG || write(array HeapByteBuffer@6c2bf641[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,237 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,238 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,238 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,238 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || sendResponse info=null content=HeapByteBuffer@5eb0e0ab[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,238 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,238 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,238 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@763e81d0] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@5eb0e0ab[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,238 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,238 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,238 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,238 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,238 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,238 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@763e81d0] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,238 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,238 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@5eb0e0ab[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,238 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,238 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,238 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,238 DEBUG || flushed 1694 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,238 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,238 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:57:17.238607+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,238 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@763e81d0] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,238 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:57:17.23874+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,238 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,238 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:57:17.238701+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,238 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,238 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,238 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=22]@2023-08-25T13:57:17.238795+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,238 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,238 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,238 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,238 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || filled 158 HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,238 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,239 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,239 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,239 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,238 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,238 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,239 DEBUG || filled 157 HeapByteBuffer@1d18b05f[p=0,l=157,c=8192,r=157]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,239 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} filled 157 HeapByteBuffer@1d18b05f[p=0,l=157,c=8192,r=157]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=157,c=8192,r=157]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=157,c=8192,r=157]={<<>>\n\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,239 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,239 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,239 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,239 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,239 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,239 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,239 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,239 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,239 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,239 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,239 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,239 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,240 DEBUG || write(array HeapByteBuffer@5e3d23a4[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,240 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,240 DEBUG || sendResponse info=null content=HeapByteBuffer@7860950f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,240 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,240 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@142e4922] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,240 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@7860950f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,240 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,239 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,239 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,240 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,240 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,240 DEBUG || complete(org.eclipse.jetty.util.Callback$3@64adb05c) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,240 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,240 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,240 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,240 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,240 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,241 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,241 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,241 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,241 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,241 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=3,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,241 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,241 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,241 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,240 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,240 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@142e4922] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,241 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,241 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\r\nn\r\n\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@7860950f[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,241 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,241 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,242 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,242 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,242 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,241 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,242 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,242 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,242 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,242 DEBUG || flushed 198 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,242 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,242 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,242 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,242 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,242 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,242 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,242 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,242 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@142e4922] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,242 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,242 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,242 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,242 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,242 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,242 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,242 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,242 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,242 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,242 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,242 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,242 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,242 DEBUG || complete(org.eclipse.jetty.util.Callback$3@63438157) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,242 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,242 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,242 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,243 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,243 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,243 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,243 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,243 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || write(array HeapByteBuffer@e20ed5b[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,243 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,243 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,243 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,243 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,243 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,243 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,243 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,243 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,243 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,244 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,244 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,244 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,243 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,244 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,244 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,243 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,244 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,244 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,244 DEBUG || sendResponse info=null content=HeapByteBuffer@2d9e7a3a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,244 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.244525+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,244 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,244 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.244591+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,244 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@73e82e71] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=31]@2023-08-25T13:57:17.244626+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,244 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@2d9e7a3a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,244 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:17.24473+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,244 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,244 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,244 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,244 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,244 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@73e82e71] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,244 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\r\nn\r\n\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2d9e7a3a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,244 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,244 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,244 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,245 DEBUG || flushed 198 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,245 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,245 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,245 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@73e82e71] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,245 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,245 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,245 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 DEBUG || complete(org.eclipse.jetty.util.Callback$3@79cbf6e1) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,245 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,245 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,245 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,245 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,245 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,245 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,245 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,245 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,246 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,246 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,246 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,246 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,244 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=3,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,246 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,245 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,246 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,246 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,246 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,246 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:17.246593+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,246 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:17.246642+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,246 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:17.246695+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,246 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,246 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,246 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,246 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:17.246789+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:17,246 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,246 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,246 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,246 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,246 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,246 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,246 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,247 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,247 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,247 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,247 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,247 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,247 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,247 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,247 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,247 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,247 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,247 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,247 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,247 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,247 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,247 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,247 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,247 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:17,247 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,247 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,247 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,247 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:17,248 DEBUG || write(array HeapByteBuffer@9d7d482[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,248 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,248 DEBUG || sendResponse info=null content=HeapByteBuffer@6d5a8b4d[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,248 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,248 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@199e61] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,248 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@6d5a8b4d[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || write(array HeapByteBuffer@7759bdbb[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,248 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@199e61] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,248 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,248 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>\r\nn\r\n\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6d5a8b4d[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || sendResponse info=null content=HeapByteBuffer@6ae34dbe[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,248 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,248 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bf51736] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,248 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@6ae34dbe[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || Date: Fri, 25 Aug 2023 10:57:17 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || flushed 226 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,248 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:17,248 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bf51736] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,248 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6ae34dbe[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@199e61] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,248 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,248 DEBUG || flushed 216 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,248 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:17,248 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,248 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5bf51736] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,249 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,249 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:17,249 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:17,249 DEBUG || complete(org.eclipse.jetty.util.Callback$3@127808a) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,249 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,249 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,249 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=4,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,249 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,249 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || complete(org.eclipse.jetty.util.Callback$3@cc5ab38) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,249 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,249 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,249 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,249 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:17 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:17,249 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,249 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,250 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,250 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,250 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:17,250 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:17,250 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:17,250 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:17,250 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,250 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:17,250 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,250 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,250 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:17,251 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:17,251 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,251 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=224, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=176) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,256 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2437 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,256 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=225, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=176, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,256 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,256 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2437 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2176, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=721) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372112 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,264 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2179, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,264 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372112 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,288 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,289 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,289 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:17,289 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:17,289 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,289 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2177, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=723) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372020 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,356 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2180, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,356 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,357 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372020 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,357 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=731, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,357 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,357 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,358 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,358 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=725) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,358 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,358 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,358 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147370996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,358 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=732, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,358 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,358 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147370995 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,372 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2178, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=724) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372003 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,373 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2181, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,373 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147372003 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2169, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=720) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,427 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457829 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2172, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2170, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,427 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=720) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457829 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,428 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2173, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=720, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,428 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457828 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2171, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=721) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,657 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457599 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,658 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2174, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,658 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457598 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=225, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=177) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,757 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1936 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,757 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=226, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=177, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,758 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,758 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1935 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2179, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=722) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,765 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371611 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,765 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2182, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,765 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,766 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,766 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,766 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,766 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,766 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371611 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,789 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,790 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:17,790 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:17,790 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,790 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:17,856 TRACE || [Producer clientId=naument--statuses] All queue lengths are the same, not using adaptive for topic naument-status [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2180, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=724) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,857 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2183, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,857 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371519 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=732, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=726) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,858 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147370495 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,858 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=733, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,858 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,858 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147370495 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2181, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=725) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371503 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,873 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2184, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,873 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371503 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2172, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,928 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=721) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457328 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2175, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457327 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2173, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=721) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457327 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,929 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2176, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=721, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,929 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:17,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457327 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:17,960 TRACE || [Producer clientId=naument--configs] The number of partitions is too small: available=1, all=1, not using adaptive for topic naument-config [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:18,158 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2174, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,158 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,158 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,158 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=722) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457098 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,159 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2177, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,159 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147457097 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=226, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=178) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,258 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1435 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,258 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=227, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=178, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,259 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,259 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1435 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2182, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=723) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,266 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371110 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,266 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2185, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,267 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371109 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,290 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,291 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,291 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:18,291 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:18,291 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,291 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2183, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=725) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,358 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2186, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,358 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371018 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=733, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=727) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147369994 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=734, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,359 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,359 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147369994 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2184, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=726) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371002 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,374 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2187, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,374 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147371002 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2175, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=722) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,429 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456827 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2178, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2176, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,429 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=722) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456827 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2179, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=722, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,430 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456826 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,590 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,592 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:18,592 TRACE Oracle|vk_nau27|streaming Current time 1692961038592 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:18,592 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,593 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:18,593 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599295 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:18,593 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,595 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599296' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,599 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,601 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599280, endScn=289931599295, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:18,601 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599281', endScn => '289931599295', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,610 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599280, 289931599295] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,659 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2177, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,659 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,659 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=723) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456597 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,660 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2180, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,660 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456596 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599285, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='04000e0036cf2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be03.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming Transaction 04000e0036cf2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599288, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:15Z, transactionId='12001000a54a2a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be0a.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming Transaction 12001000a54a2a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 DEBUG Oracle|vk_nau27|streaming Counters{rows=5, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=2, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3686. Offset SCN: 289931599280, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599288, txIds=[12001000a54a2a00]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:18,686 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,689 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,690 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,693 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:18,695 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:18,716 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:18,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:18,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=136, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=136, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,716 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=227, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=179) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,759 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 934 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,759 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=228, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=179, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,760 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,760 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 933 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2185, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=724) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,767 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370609 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2188, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,791 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,792 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,792 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:18,792 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:18,792 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,792 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2186, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=726) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2189, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=734, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=728) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147369494 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,859 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=735, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,859 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147369494 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2187, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=727) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,874 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370502 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,874 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2190, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370501 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2178, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=723) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456326 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2181, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2179, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,930 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=723) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:18,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456326 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:18,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2182, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=723, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:18,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456325 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,092 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,092 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,092 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,092 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:57:19.092607+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,092 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=23]@2023-08-25T13:57:19.09279+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,092 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:57:19.09291+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,092 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,093 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:57:19.093002+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,093 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,093 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,093 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1847/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,093 DEBUG || filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,093 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,093 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,093 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,093 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,093 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,093 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,093 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,093 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,093 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,093 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,093 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,093 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,094 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,094 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,095 DEBUG || write(array HeapByteBuffer@6280bdb4[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,095 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,095 DEBUG || sendResponse info=null content=HeapByteBuffer@61abd8cc[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,095 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,095 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@687caf64] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,095 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@61abd8cc[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,095 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,095 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,095 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@687caf64] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@61abd8cc[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,096 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,096 DEBUG || flushed 173 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,096 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,096 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,096 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@687caf64] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,096 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,096 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,096 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2b3cddb6) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,096 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,096 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,096 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,096 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,096 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,096 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,096 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,096 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,096 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,096 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,096 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,096 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,097 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:19.097557+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,097 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,097 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,097 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,097 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:19.097646+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,097 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1846/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,097 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,097 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,097 DEBUG || filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=4,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,098 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,098 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,098 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,098 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,098 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,098 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,098 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,098 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,098 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,098 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,098 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,098 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,098 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,099 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,099 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,097 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=32]@2023-08-25T13:57:19.097753+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,099 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,099 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:19.099306+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,099 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,099 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,099 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,097 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1847/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,099 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,099 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:19.099469+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,099 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:19.099521+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1848/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,099 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:19.099553+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,099 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=1848/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,099 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:19.099625+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,099 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961039099 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,099 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1848/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,099 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1848/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,099 DEBUG || filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,099 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,099 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,099 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,099 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,099 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,099 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,100 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,099 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 692810ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,100 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,100 DEBUG || write(array HeapByteBuffer@54d4ae98[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,100 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,100 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,100 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,100 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,100 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,100 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,100 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,100 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,100 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,100 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,100 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,100 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,100 DEBUG || sendResponse info=null content=HeapByteBuffer@28e091e0[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,100 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,100 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ee1aa87] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,101 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@28e091e0[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,101 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,101 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,101 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ee1aa87] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,101 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>n\r\n\r\n\r\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@28e091e0[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,101 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,101 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,101 DEBUG || flushed 1671 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,101 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,101 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,101 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@4ee1aa87] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,101 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,101 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961039101 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 692808ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,101 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,101 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,101 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,101 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,101 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,101 DEBUG || complete(org.eclipse.jetty.util.Callback$3@31d7d335) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,101 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,101 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,101 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,101 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,101 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,101 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,101 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,101 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,101 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,101 DEBUG || write(array HeapByteBuffer@708886b6[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,101 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,102 DEBUG || sendResponse info=null content=HeapByteBuffer@2e996d37[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1cb05533] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@2e996d37[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,102 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,102 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,102 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1cb05533] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>\nson\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2e996d37[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,102 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,102 DEBUG || flushed 1047 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,102 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,102 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,102 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1cb05533] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,102 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,102 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,102 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || complete(org.eclipse.jetty.util.Callback$3@3aeac4ba) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,102 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,102 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,102 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,102 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,102 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,102 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,102 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,102 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,102 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,102 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,102 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,103 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,103 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,103 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,103 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,101 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,103 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,103 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,102 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=5,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=5} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,103 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,103 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=6/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,103 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,103 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,103 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:57:19.103909+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=24]@2023-08-25T13:57:19.103971+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,104 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,104 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,103 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,104 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,104 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,104 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,104 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,104 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,104 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,104 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,104 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,104 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:57:19.104348+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,104 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,104 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,104 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,104 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,104 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:57:19.10444+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,104 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=HEADER,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=5,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,104 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,104 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,104 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,105 DEBUG || write(array HeapByteBuffer@40061bf9[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,105 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,105 DEBUG || sendResponse info=null content=HeapByteBuffer@6c45ed88[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,105 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,105 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2fb3c715] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,105 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@6c45ed88[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,105 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,105 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,105 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2fb3c715] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,105 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6c45ed88[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,105 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,105 DEBUG || flushed 311 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,105 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,105 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,105 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2fb3c715] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,105 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,105 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,106 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,106 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || complete(org.eclipse.jetty.util.Callback$3@2072f7ad) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,106 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,106 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,106 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,106 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,106 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,104 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,106 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,106 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,106 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,107 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,107 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,107 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,107 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,107 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,107 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,106 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,106 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,107 DEBUG || write(array HeapByteBuffer@36cb6b7e[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,107 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,107 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,107 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,107 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,107 DEBUG || sendResponse info=null content=HeapByteBuffer@46355ea4[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,108 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,108 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,108 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@350abdf3] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,108 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@46355ea4[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,108 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,107 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,108 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:19.108176+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,108 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:19.108229+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,108 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:19.108267+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,108 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,108 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:19.108326+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,108 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,108 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,108 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,108 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,108 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,108 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,108 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,108 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,108 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,108 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,108 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,108 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,108 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,108 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,108 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,108 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,108 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,108 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,108 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,109 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,109 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,109 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,108 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,109 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@350abdf3] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,109 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@46355ea4[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,109 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,109 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,109 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961039109 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 DEBUG || flushed 312 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,109 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,109 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,109 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,109 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@350abdf3] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,109 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 692800ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,109 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,109 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,110 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 DEBUG || complete(org.eclipse.jetty.util.Callback$3@14374c01) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,110 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,110 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,110 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=6,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,110 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,110 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,110 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,110 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,110 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,110 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,110 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,110 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,110 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,110 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,110 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,111 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,111 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:19.111057+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,111 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=33]@2023-08-25T13:57:19.111112+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,111 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,111 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,111 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=149,c=8192,r=149]={<<>>\nson\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,111 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,111 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,111 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,111 DEBUG || write(array HeapByteBuffer@4cb6b878[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,111 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,111 DEBUG || sendResponse info=null content=HeapByteBuffer@73e4649a[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,111 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,111 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7e6ee20d] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@73e4649a[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,111 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,111 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,111 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7e6ee20d] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@73e4649a[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,111 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,111 DEBUG || flushed 950 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,111 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,111 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,111 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7e6ee20d] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,111 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,111 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,111 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,111 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,111 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,111 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,111 DEBUG || complete(org.eclipse.jetty.util.Callback$3@33f2411b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,111 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,112 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,112 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,112 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,112 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,112 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,112 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,112 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,112 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,112 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,112 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,112 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,112 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,112 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:19.112873+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,112 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:19.112917+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,112 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=SPACE2,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,112 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,111 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,113 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,113 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,112 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,113 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,113 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,113 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,113 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,113 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:57:19.113468+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,113 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=25]@2023-08-25T13:57:19.113519+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,113 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,113 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,113 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,113 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,113 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=6,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,113 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,113 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,113 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,113 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,114 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,114 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,114 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,114 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,114 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,114 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,114 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,114 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,114 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,114 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,114 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,114 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,114 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:57:19.114723+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,114 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:57:19.114766+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,114 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,114 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,114 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,114 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,112 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,114 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,114 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,111 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,114 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,115 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,115 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,115 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,115 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,115 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,115 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,115 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,115 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,115 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,115 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,115 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,115 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,115 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,115 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,115 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,115 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,115 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,115 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,115 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,115 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,116 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961039115 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,116 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,116 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 692793ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,116 DEBUG || write(array HeapByteBuffer@e635867[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,116 DEBUG || write(array HeapByteBuffer@36e327e8[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,116 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,116 DEBUG || sendResponse info=null content=HeapByteBuffer@fdac29a[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,116 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,116 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,116 DEBUG || sendResponse info=null content=HeapByteBuffer@6b766c49[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,116 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@437cbf96] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,116 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,116 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@fdac29a[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7571b5d9] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,116 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@437cbf96] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,116 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@fdac29a[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@6b766c49[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,116 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7571b5d9] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,116 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\n\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@6b766c49[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || flushed 1574 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,116 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || flushed 1071 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,116 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,117 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,116 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@437cbf96] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,117 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,117 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7571b5d9] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,117 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,117 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,117 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,117 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,117 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,117 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || complete(org.eclipse.jetty.util.Callback$3@7ee244a3) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,117 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || complete(org.eclipse.jetty.util.Callback$3@195260d9) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,117 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,117 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,117 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,117 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,117 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,117 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,117 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,117 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,117 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,117 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,117 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,117 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,117 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,117 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,117 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,117 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,118 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=4} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,118 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,118 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:19.118135+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,118 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:19.118192+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,118 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,118 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,118 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,118 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,118 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,118 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,118 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,118 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,118 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,118 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,118 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,118 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,118 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,118 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,118 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,119 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,119 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,117 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,118 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,119 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,119 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:19.119518+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,119 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,119 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:19.119559+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,119 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,119 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,119 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,119 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,119 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,119 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,119 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,119 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,119 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,119 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,119 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,119 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,119 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,119 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=7,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=6} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,119 DEBUG || filled 158 HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,119 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,119 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,120 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,120 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=158,c=8192,r=158]={<<>>gth: 867\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,120 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,120 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,120 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,120 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,120 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,120 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,120 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,120 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,120 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,120 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,120 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,120 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,120 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,120 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:19,120 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,120 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,120 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,120 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961039120 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,120 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,120 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 692789ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:19,121 DEBUG || write(array HeapByteBuffer@5111e0f0[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,121 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,121 DEBUG || sendResponse info=null content=HeapByteBuffer@806265a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,121 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,121 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2e20a8c3] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,121 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@806265a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,121 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,121 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,121 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2e20a8c3] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,121 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@806265a[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,121 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,121 DEBUG || flushed 198 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,121 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,121 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,121 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2e20a8c3] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,121 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,121 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,121 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,121 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,121 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,121 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,121 DEBUG || complete(org.eclipse.jetty.util.Callback$3@420b26f8) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,122 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,122 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,122 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,122 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,122 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,122 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,122 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,122 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,122 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,122 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,122 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,122 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,122 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,122 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,122 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,122 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,122 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,122 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,122 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,122 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,123 DEBUG || write(array HeapByteBuffer@5787db76[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,123 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,123 DEBUG || sendResponse info=null content=HeapByteBuffer@e6bcc95[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,123 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,123 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@cd9ec89] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,123 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@e6bcc95[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,123 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,123 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,123 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@cd9ec89] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,123 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\n\nso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@e6bcc95[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,123 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,123 DEBUG || flushed 1694 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,123 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,123 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,123 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@cd9ec89] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,123 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,123 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,123 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,123 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,123 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,123 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,123 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5ff921e) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,123 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,123 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,123 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,123 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,123 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,123 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 5 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,124 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,124 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,124 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=6} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,124 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,124 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,124 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,124 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,124 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,124 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,125 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:19.124984+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,125 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:19.125049+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,125 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=34]@2023-08-25T13:57:19.125079+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,125 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,125 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:19.125139+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,125 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,125 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,125 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,125 DEBUG || filled 157 HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,125 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,125 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,125 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 157 HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,125 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,125 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=157,c=8192,r=157]={<<>>\n\r\nconten...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,125 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,125 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,125 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,125 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,125 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,125 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,125 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,125 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,125 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,125 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,125 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,125 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,125 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,126 DEBUG || write(array HeapByteBuffer@50dbe989[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,126 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,126 DEBUG || sendResponse info=null content=HeapByteBuffer@13b177b7[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,126 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,126 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@228b81bb] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,126 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@13b177b7[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,126 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,126 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,126 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@228b81bb] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,126 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>\r\non\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@13b177b7[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,126 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,126 DEBUG || flushed 198 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,126 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,126 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,127 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@228b81bb] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,127 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,127 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,127 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1573d5d8) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,127 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,127 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,127 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,127 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,127 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,127 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,127 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,127 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,127 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,127 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,127 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,119 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,128 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,128 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,128 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,124 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,128 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,128 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,128 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,128 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:19.128576+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,128 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:19.128643+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,128 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,128 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,128 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,128 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,128 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,128 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,128 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,129 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,129 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,129 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,129 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,129 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,129 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,129 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,129 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,129 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,129 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,129 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,129 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,129 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,130 DEBUG || write(array HeapByteBuffer@4a7e5a65[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,130 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,130 DEBUG || sendResponse info=null content=HeapByteBuffer@1aee2704[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,130 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,130 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1319d0de] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,130 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@1aee2704[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,130 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,130 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,130 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1319d0de] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,130 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1aee2704[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,130 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,130 DEBUG || flushed 226 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,130 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,130 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,130 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@1319d0de] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,130 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,130 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,130 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,130 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,130 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,130 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,130 DEBUG || complete(org.eclipse.jetty.util.Callback$3@32c2a13f) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,130 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,130 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,130 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,130 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,130 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,130 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,130 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,130 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,131 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,131 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,131 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,131 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,131 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,131 DEBUG || changeInterests p=true 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,131 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,131 DEBUG || Key interests updated 1 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,124 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,131 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,128 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:19.128674+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,128 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,131 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:19.131796+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:19,128 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,131 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,131 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,131 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=7,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,131 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,132 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,132 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,132 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,132 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,132 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,132 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,132 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,132 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,132 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:19,132 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,132 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,132 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,132 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:19,133 DEBUG || write(array HeapByteBuffer@d66b3f4[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,133 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,133 DEBUG || sendResponse info=null content=HeapByteBuffer@55aa8235[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,133 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,133 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d8d3a24] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,133 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@55aa8235[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,133 DEBUG || Date: Fri, 25 Aug 2023 10:57:19 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,133 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:19,133 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d8d3a24] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,133 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@55aa8235[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,133 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,133 DEBUG || flushed 216 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,133 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,133 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:19,133 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@2d8d3a24] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,133 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,133 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:19,133 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:19,133 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,133 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,133 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,133 DEBUG || complete(org.eclipse.jetty.util.Callback$3@27d3c3e4) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:19,133 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,133 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,133 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,133 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,133 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,133 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:19 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:19,133 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,133 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,133 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,133 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,133 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,133 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,133 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,134 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,134 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,134 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:19,134 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=8,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:19,134 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,134 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,134 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:19,134 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,134 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,134 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:19,134 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:19,134 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:19,134 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,134 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:19,134 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,134 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:19,134 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:19,134 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,134 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2180, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=724) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456096 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,160 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2183, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,160 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147456096 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=228, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=180) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 433 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=229, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=180, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 433 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2188, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=725) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2191, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,292 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,293 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:19,293 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:19,293 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,293 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2189, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=735, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=727) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370017 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2192, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=729) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147368994 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370017 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,359 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=736, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,360 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,360 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147368993 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2190, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=728) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370001 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2193, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,375 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147370001 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,430 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2181, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=724) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455826 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2184, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455825 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2182, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=724) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,431 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455825 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,431 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2185, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=724, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,432 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455824 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2183, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=725) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455595 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,661 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2186, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,661 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,662 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,662 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455595 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,694 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:19,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:19,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=230, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=230, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,694 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,694 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=229, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=181) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,760 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2934 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=231, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=181, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,760 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,760 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2934 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2191, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=726) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2194, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,793 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,794 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,794 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:19,794 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:19,794 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,794 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2192, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=728) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,859 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369517 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2195, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369516 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=736, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=730) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,861 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147368492 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,861 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=737, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,861 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,861 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147368492 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2193, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=729) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369501 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,875 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2196, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,875 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369501 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2184, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=725) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,931 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455325 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,931 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2187, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455325 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2185, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=725) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455324 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:19,932 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2188, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=725, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:19,932 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455324 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,073 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,073 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,073 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=945/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,073 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=945/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,073 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=945/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,073 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:20.073676+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,073 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:20.073792+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=945/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,073 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=35]@2023-08-25T13:57:20.073846+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,073 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=945/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,073 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:20.073944+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,073 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,074 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,074 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,074 DEBUG || filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,074 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=946/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,074 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,074 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 129 HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,074 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,074 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=129,c=8192,r=129]={<<>>0230217)\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,074 DEBUG || REQUEST for //naument:8083/connectors on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} GET //naument:8083/connectors HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,074 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,074 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,074 DEBUG || handle //naument:8083/connectors HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,074 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,074 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,074 DEBUG || REQUEST GET /connectors on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,074 DEBUG || scope null||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,074 DEBUG || context=||/connectors @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,074 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,074 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,074 DEBUG || servlet ||/connectors -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,074 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,075 DEBUG || write(array HeapByteBuffer@1c435829[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,076 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=32 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,076 DEBUG || sendResponse info=null content=HeapByteBuffer@254e990d[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,076 DEBUG || COMMIT for /connectors on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,076 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e834bd7] (null,[p=0,l=32,c=8192,r=32],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,076 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=32} last=true content=HeapByteBuffer@254e990d[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,076 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 32 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,076 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,076 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e834bd7] ([p=0,l=141,c=8192,r=141],[p=0,l=32,c=8192,r=32],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,076 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@254e990d[p=0,l=32,c=8192,r=32]={<<<["vk_nau27_src","vk_nau27_sink"]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,076 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,076 DEBUG || flushed 173 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,076 DEBUG || Flushed=true written=173 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,076 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,076 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=32},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@e834bd7] ([p=141,l=141,c=8192,r=0],[p=32,l=32,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,076 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,076 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,076 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,076 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,076 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,076 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,076 DEBUG || complete(org.eclipse.jetty.util.Callback$3@15a48019) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,076 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,076 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,076 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,076 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,076 DEBUG || onCompleted for /connectors written=32 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,076 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors HTTP/1.1" 200 32 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,076 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,076 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,076 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,077 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,077 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,077 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,077 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,077 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,077 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,077 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,077 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,077 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,077 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=946/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,077 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,077 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,078 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/REPRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:20.078026+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,078 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:20.078088+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,078 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:20.078123+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,078 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.07819+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,078 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,078 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,078 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,078 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=946/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,078 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=943/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,078 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=943/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,078 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=944/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,078 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:57:20.07842+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,078 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:57:20.078461+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=944/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,078 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=944/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,078 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=944/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,078 DEBUG || filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,078 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} filled 143 HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,078 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=8,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,078 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,078 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} GET //naument:8083/connectors/vk_nau27_sink HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,078 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,079 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,079 DEBUG || handle //naument:8083/connectors/vk_nau27_sink HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,079 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,079 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,079 DEBUG || REQUEST GET /connectors/vk_nau27_sink on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,079 DEBUG || scope null||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,079 DEBUG || context=||/connectors/vk_nau27_sink @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,079 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,079 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,079 DEBUG || servlet ||/connectors/vk_nau27_sink -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,079 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,079 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,079 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,078 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,079 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,078 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=26]@2023-08-25T13:57:20.078487+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,078 DEBUG || filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,079 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:20.079936+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,079 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,079 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,079 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 142 HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,080 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,080 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,080 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,080 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,080 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} GET //naument:8083/connectors/vk_nau27_src HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,080 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,080 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,080 DEBUG || handle //naument:8083/connectors/vk_nau27_src HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,080 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,080 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,080 DEBUG || REQUEST GET /connectors/vk_nau27_src on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,080 DEBUG || scope null||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,080 DEBUG || context=||/connectors/vk_nau27_src @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,080 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,080 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,080 DEBUG || servlet ||/connectors/vk_nau27_src -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,080 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,079 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,079 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,080 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,080 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961040080 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,080 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 691829ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,081 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,081 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961040081 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 691828ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,081 DEBUG || write(array HeapByteBuffer@79ca5562[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,081 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=905 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,081 DEBUG || sendResponse info=null content=HeapByteBuffer@15e6f481[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,081 DEBUG || COMMIT for /connectors/vk_nau27_sink on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,081 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@756336eb] (null,[p=0,l=905,c=8192,r=905],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,081 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=905} last=true content=HeapByteBuffer@15e6f481[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 905 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@756336eb] ([p=0,l=142,c=8192,r=142],[p=0,l=905,c=8192,r=905],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,081 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>cation/js...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@15e6f481[p=0,l=905,c=8192,r=905]={<<<{"name":"vk_nau27_sink","...ask":0}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,081 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,081 DEBUG || write(array HeapByteBuffer@ad76870[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,081 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1528 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,081 DEBUG || sendResponse info=null content=HeapByteBuffer@616ab829[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,081 DEBUG || flushed 1047 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,081 DEBUG || Flushed=true written=1047 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,081 DEBUG || COMMIT for /connectors/vk_nau27_src on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,081 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,081 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca80bfa] (null,[p=0,l=1528,c=8192,r=1528],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,081 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=905},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@756336eb] ([p=142,l=142,c=8192,r=0],[p=905,l=905,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,081 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1528} last=true content=HeapByteBuffer@616ab829[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1528 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,081 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,081 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca80bfa] ([p=0,l=143,c=8192,r=143],[p=0,l=1528,c=8192,r=1528],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=143,c=8192,r=143]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@616ab829[p=0,l=1528,c=8192,r=1528]={<<<{"name":"vk_nau27_src","c...k":0}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,082 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,082 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,082 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,082 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || flushed 1671 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5b1a2b02) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,082 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || Flushed=true written=1671 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,082 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,082 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1528},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3ca80bfa] ([p=143,l=143,c=8192,r=0],[p=1528,l=1528,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || onCompleted for /connectors/vk_nau27_sink written=905 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,082 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink HTTP/1.1" 200 905 "-" "ReactorNetty/1.1.6" 4 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,082 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,082 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || complete(org.eclipse.jetty.util.Callback$3@1bc49e09) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,082 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,082 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || onCompleted for /connectors/vk_nau27_src written=1528 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=9,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src HTTP/1.1" 200 1528 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,082 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,082 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,082 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,083 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,083 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,083 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,083 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,083 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,083 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,083 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,082 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,083 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,082 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,083 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,083 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,083 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,083 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,083 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,083 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,083 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:20.083951+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,083 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,084 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,084 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,084 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:20.084002+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,084 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=36]@2023-08-25T13:57:20.084037+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,084 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:20.084086+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,084 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,084 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,084 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,084 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,084 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,084 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,084 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,084 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=6/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,084 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>son\r\n\r\n\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,084 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,084 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,084 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/status HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,084 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,084 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,084 DEBUG || REQUEST GET /connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,084 DEBUG || scope null||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,084 DEBUG || context=||/connectors/vk_nau27_sink/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,084 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,084 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,084 DEBUG || servlet ||/connectors/vk_nau27_sink/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,084 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,085 DEBUG || write(array HeapByteBuffer@1f122df9[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,085 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=169 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,085 DEBUG || sendResponse info=null content=HeapByteBuffer@1e28bf04[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,085 DEBUG || COMMIT for /connectors/vk_nau27_sink/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,085 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5a30eaff] (null,[p=0,l=169,c=8192,r=169],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,085 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=169} last=true content=HeapByteBuffer@1e28bf04[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,085 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 169 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,085 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,085 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5a30eaff] ([p=0,l=142,c=8192,r=142],[p=0,l=169,c=8192,r=169],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,085 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=142,c=8192,r=142]={<<>>son\r\n\r\njs...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1e28bf04[p=0,l=169,c=8192,r=169]={<<<{"name":"vk_nau27_sink","...:8083"}],"type":"sink"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,085 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,085 DEBUG || flushed 311 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,085 DEBUG || Flushed=true written=311 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,085 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,085 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=169},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@5a30eaff] ([p=142,l=142,c=8192,r=0],[p=169,l=169,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,085 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,085 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,086 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,086 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 DEBUG || complete(org.eclipse.jetty.util.Callback$3@6345debe) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,086 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 DEBUG || onCompleted for /connectors/vk_nau27_sink/status written=169 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink/status HTTP/1.1" 200 169 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,086 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,086 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/status,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,086 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,086 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,086 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,086 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,086 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,086 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,086 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,086 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,086 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,086 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,086 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,084 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,086 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,086 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:20.087422+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,087 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=27]@2023-08-25T13:57:20.087481+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,087 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.087539+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,087 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,087 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:20.087588+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,087 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:20.087643+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,087 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.087711+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,087 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.08776+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,087 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.08762+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,087 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,087 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,087 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,087 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,088 DEBUG || filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\ng...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,087 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=9,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} GET //naument:8083/connectors/vk_nau27_sink/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/config HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,088 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || REQUEST GET /connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,088 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} GET //naument:8083/connectors/vk_nau27_src/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || scope null||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,088 DEBUG || context=||/connectors/vk_nau27_sink/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,088 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,088 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,088 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || servlet ||/connectors/vk_nau27_sink/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,088 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,088 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,088 DEBUG || handle //naument:8083/connectors/vk_nau27_src/status HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,088 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,088 DEBUG || REQUEST GET /connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,089 DEBUG || scope null||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,089 DEBUG || context=||/connectors/vk_nau27_src/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,089 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,089 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,089 DEBUG || servlet ||/connectors/vk_nau27_src/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,089 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,089 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961040089 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 691820ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,089 DEBUG || write(array HeapByteBuffer@1aebe15e[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,089 DEBUG || write(array HeapByteBuffer@13200094[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,089 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=170 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=808 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || sendResponse info=null content=HeapByteBuffer@2ff217b[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || sendResponse info=null content=HeapByteBuffer@42f7650c[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || COMMIT for /connectors/vk_nau27_src/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || COMMIT for /connectors/vk_nau27_sink/config on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c07fec6] (null,[p=0,l=170,c=8192,r=170],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@56be5376] (null,[p=0,l=808,c=8192,r=808],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=170} last=true content=HeapByteBuffer@2ff217b[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 170 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=808} last=true content=HeapByteBuffer@42f7650c[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 808 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c07fec6] ([p=0,l=142,c=8192,r=142],[p=0,l=170,c=8192,r=170],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,090 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@2ff217b[p=0,l=170,c=8192,r=170]={<<<{"name":"vk_nau27_src","c...083"}],"type":"source"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@56be5376] ([p=0,l=142,c=8192,r=142],[p=0,l=808,c=8192,r=808],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@42f7650c[p=0,l=808,c=8192,r=808]={<<<{"connector.class":"io.de...ttp://naument-sr:8081"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || flushed 312 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || flushed 950 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || Flushed=true written=312 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || Flushed=true written=950 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,090 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=170},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7c07fec6] ([p=142,l=142,c=8192,r=0],[p=170,l=170,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=808},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@56be5376] ([p=142,l=142,c=8192,r=0],[p=808,l=808,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,090 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,090 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,090 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,090 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,090 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || complete(org.eclipse.jetty.util.Callback$3@7751398b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || complete(org.eclipse.jetty.util.Callback$3@67abb5f0) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,090 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,090 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || onCompleted for /connectors/vk_nau27_src/status written=170 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 DEBUG || onCompleted for /connectors/vk_nau27_sink/config written=808 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,090 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src/status HTTP/1.1" 200 170 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,090 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink/config HTTP/1.1" 200 808 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,090 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,090 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,091 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=10,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/config,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,091 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,091 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,091 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:20.091652+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:20.091761+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,091 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,091 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:20.091735+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,091 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=37]@2023-08-25T13:57:20.091837+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,091 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,091 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,091 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,091 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,091 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=5/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,091 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=5/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,092 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,092 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,092 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,092 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} GET //naument:8083/connectors/vk_nau27_src/config HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,092 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,092 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,092 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,092 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,092 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:20.092949+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,092 DEBUG || handle //naument:8083/connectors/vk_nau27_src/config HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,093 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,093 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:20.093039+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,093 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=28]@2023-08-25T13:57:20.093063+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,093 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,093 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:20.093132+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,093 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,093 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,093 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,093 DEBUG || REQUEST GET /connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,093 DEBUG || filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,093 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,093 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,093 DEBUG || scope null||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=10,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,093 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=149,c=8192,r=149]={<<>>\njson\r\n\r\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || context=||/connectors/vk_nau27_src/config @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,093 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,093 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,093 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || servlet ||/connectors/vk_nau27_src/config -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,093 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,093 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,093 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,093 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,093 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,093 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,093 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,093 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,093 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,093 DEBUG || scope null||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,093 DEBUG || context=||/connectors/vk_nau27_sink/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,093 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,093 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,093 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,093 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,093 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector config read request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,093 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting connector info request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,093 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,094 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_sink [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,094 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961040094 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 691815ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,094 DEBUG || write(array HeapByteBuffer@6db64aca[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,094 DEBUG || write(array HeapByteBuffer@2ae1f1e9[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,094 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=929 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1431 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || sendResponse info=null content=HeapByteBuffer@36bd18e7[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || sendResponse info=null content=HeapByteBuffer@1e7eaf6c[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || COMMIT for /connectors/vk_nau27_src/config on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6b876bdd] (null,[p=0,l=929,c=8192,r=929],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62f58082] (null,[p=0,l=1431,c=8192,r=1431],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=929} last=true content=HeapByteBuffer@36bd18e7[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1431} last=true content=HeapByteBuffer@1e7eaf6c[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1431 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 929 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,095 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62f58082] ([p=0,l=143,c=8192,r=143],[p=0,l=1431,c=8192,r=1431],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6b876bdd] ([p=0,l=142,c=8192,r=142],[p=0,l=929,c=8192,r=929],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@1e7eaf6c[p=0,l=1431,c=8192,r=1431]={<<<{"connector.class":"io.de...napshot.mode":"always"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=142,c=8192,r=142]={<<>>json\r\n\r\ns...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@36bd18e7[p=0,l=929,c=8192,r=929]={<<<[{"id":{"connector":"vk_n...p://naument-sr:8081"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || flushed 1574 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || Flushed=true written=1574 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || flushed 1071 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || Flushed=true written=1071 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1431},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@62f58082] ([p=143,l=143,c=8192,r=0],[p=1431,l=1431,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,095 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=929},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@6b876bdd] ([p=142,l=142,c=8192,r=0],[p=929,l=929,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,095 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,095 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,095 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,095 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || complete(org.eclipse.jetty.util.Callback$3@5d7e6e03) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || complete(org.eclipse.jetty.util.Callback$3@267af83d) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,095 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || onCompleted for /connectors/vk_nau27_src/config written=1431 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,095 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks written=929 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,095 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src/config HTTP/1.1" 200 1431 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,095 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink/tasks HTTP/1.1" 200 929 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,095 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,095 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,095 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=2} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || parseNext s=CONTENT HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,096 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=11,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/config,age=4} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,096 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,096 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,096 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=41]@2023-08-25T13:57:20.096545+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,096 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=41]@2023-08-25T13:57:20.096591+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,096 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,096 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=42]@2023-08-25T13:57:20.096733+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,096 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=42]@2023-08-25T13:57:20.096779+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,096 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,096 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,096 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,096 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,096 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,096 DEBUG || filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,097 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,097 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 148 HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=148,c=8192,r=148]={<<>>\n\njson\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || ReservedThread@1eabc121{RUNNING,thread=Thread[qtp974320615-125,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,097 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,097 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,097 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,097 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,097 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || ReservedThread@39e9e947{RUNNING,thread=Thread[qtp974320615-128,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,097 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,097 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,097 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,097 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,097 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,097 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,097 DEBUG || scope null||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,098 DEBUG || context=||/connectors/vk_nau27_src/tasks @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,098 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,098 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,098 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,098 DEBUG || servlet ||/connectors/vk_nau27_src/tasks -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,098 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,098 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,098 DEBUG || ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:20.098135+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,098 DEBUG || ReservedThread@39e9e947{RESERVED,thread=Thread[qtp974320615-128,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:20.098249+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,098 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:20.098211+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,098 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=38]@2023-08-25T13:57:20.098295+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,098 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,098 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,098 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,098 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,098 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,098 TRACE || [Worker clientId=connect-1, groupId=naument] Submitting get task configuration request vk_nau27_src [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,098 DEBUG || [Worker clientId=connect-1, groupId=naument] Received user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,098 DEBUG || filled 158 HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,098 DEBUG || [Worker clientId=connect-1, groupId=naument] Raising WakeupException in response to user wakeup [org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient] 2023-08-25 13:57:20,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 158 HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,098 TRACE || [Worker clientId=connect-1, groupId=naument] Woken up while polling for group activity [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,098 DEBUG || [Worker clientId=connect-1, groupId=naument] Ensuring group membership is still active [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,098 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,098 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=158,c=8192,r=158]={<<>>\r\ncontent...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,098 TRACE || [Worker clientId=connect-1, groupId=naument] Returning early because rebalance is marked as resolved (rebalanceResolved: true) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,098 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_sink/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,099 DEBUG || [Worker clientId=connect-1, groupId=naument] Scheduled next key rotation at: 1692961731909 (now: 1692961040098 scheduledTick: 1692961731909) [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,099 TRACE || [Worker clientId=connect-1, groupId=naument] Skipping config updates with incremental cooperative rebalancing since no config rebalance is required and there are no connector config, task config, or target state changes pending [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,099 TRACE || [Worker clientId=connect-1, groupId=naument] Polling for group activity; will wait for 691810ms or until poll is interrupted by either config backing store updates or a new external request [org.apache.kafka.connect.runtime.distributed.DistributedHerder] 2023-08-25 13:57:20,099 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,099 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,099 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/tasks/0/status HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,099 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,099 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,099 DEBUG || REQUEST GET /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,099 DEBUG || scope null||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,099 DEBUG || context=||/connectors/vk_nau27_sink/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,099 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,099 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,099 DEBUG || servlet ||/connectors/vk_nau27_sink/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,099 DEBUG || write(array HeapByteBuffer@70d00446[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,099 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,099 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=1551 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,099 DEBUG || sendResponse info=null content=HeapByteBuffer@52b0a421[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,099 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,099 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3e0804f2] (null,[p=0,l=1551,c=8192,r=1551],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,099 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=1551} last=true content=HeapByteBuffer@52b0a421[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,099 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 1551 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,099 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,099 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3e0804f2] ([p=0,l=143,c=8192,r=143],[p=0,l=1551,c=8192,r=1551],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,099 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=143,c=8192,r=143]={<<>>on\r\n\r\njso...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@52b0a421[p=0,l=1551,c=8192,r=1551]={<<<[{"id":{"connector":"vk_n...pshot.mode":"always"}}]>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,099 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,099 DEBUG || flushed 1694 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,099 DEBUG || Flushed=true written=1694 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,099 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,099 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=1551},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@3e0804f2] ([p=143,l=143,c=8192,r=0],[p=1551,l=1551,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,099 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,099 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,099 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,099 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,099 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,099 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || complete(org.eclipse.jetty.util.Callback$3@34c06a0a) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,100 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,100 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,100 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,100 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks written=1551 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src/tasks HTTP/1.1" 200 1551 "-" "ReactorNetty/1.1.6" 3 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,100 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,100 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || write(array HeapByteBuffer@6c4aa5bc[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,100 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,100 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,100 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,100 DEBUG || sendResponse info=null content=HeapByteBuffer@3c95d3c4[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60016], selector=sun.nio.ch.EPollSelectorImpl@1c2db2c, interestOps=1, readyOps=1 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,100 DEBUG || COMMIT for /connectors/vk_nau27_sink/tasks/0/status on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,100 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7febcd7b] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,100 DEBUG || task CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,100 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@3c95d3c4[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,100 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:20.100837+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,100 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,100 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,100 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,100 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=29]@2023-08-25T13:57:20.100906+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=3/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,100 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,100 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7febcd7b] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || fillable FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,101 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,101 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>\r\non\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@3c95d3c4[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,101 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=4/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,101 DEBUG || filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,101 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,101 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,101 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} filled 157 HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,101 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=11,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || parseNext s=START HeapByteBuffer@360c19be[p=0,l=157,c=8192,r=157]={<<>>\ngth: 867...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || ReservedThread@11a632a9{RUNNING,thread=Thread[qtp974320615-154,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,101 DEBUG || flushed 198 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=3} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,101 DEBUG || ReservedThread@1eabc121{RESERVED,thread=Thread[qtp974320615-125,5,main]} task=EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:20.101232+03:00 ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,101 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,101 DEBUG || EatWhatYouKill@30a791a6/SelectorProducer@719bb60d/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=30]@2023-08-25T13:57:20.101267+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,101 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,101 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,101 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} GET //naument:8083/connectors/vk_nau27_src/tasks/0/status HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,101 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,101 DEBUG || handle //naument:8083/connectors/vk_nau27_src/tasks/0/status HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,101 DEBUG || handling HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,101 DEBUG || action DISPATCH HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,101 DEBUG || REQUEST GET /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,101 DEBUG || scope null||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,101 DEBUG || context=||/connectors/vk_nau27_src/tasks/0/status @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,101 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,101 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,101 DEBUG || servlet ||/connectors/vk_nau27_src/tasks/0/status -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,101 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,102 DEBUG || write(array HeapByteBuffer@741c2d31[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,102 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=57 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,102 DEBUG || sendResponse info=null content=HeapByteBuffer@dbaf9bb[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@2ce8eddd{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,102 DEBUG || COMMIT for /connectors/vk_nau27_src/tasks/0/status on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,102 DEBUG || generate: NEED_HEADER for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@293b3e99] (null,[p=0,l=57,c=8192,r=57],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,102 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=57} last=true content=HeapByteBuffer@dbaf9bb[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,102 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 57 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,102 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,102 DEBUG || generate: FLUSH for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@293b3e99] ([p=0,l=141,c=8192,r=141],[p=0,l=57,c=8192,r=57],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,102 DEBUG || write: WriteFlusher@1ad21117{IDLE}->null [HeapByteBuffer@7b3e78db[p=0,l=141,c=8192,r=141]={<<>>lication/...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@dbaf9bb[p=0,l=57,c=8192,r=57]={<<<{"id":0,"state":"RUNNING"...id":"172.18.0.10:8083"}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,102 DEBUG || update WriteFlusher@1ad21117{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,102 DEBUG || flushed 198 SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=W,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=COMPLETING}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,102 DEBUG || Flushed=true written=198 remaining=0 WriteFlusher@1ad21117{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,102 DEBUG || update WriteFlusher@1ad21117{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,102 DEBUG || generate: DONE for SendCallback@13c45127[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@293b3e99] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,102 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,102 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,102 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,102 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,101 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,101 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,101 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,102 DEBUG || nextAction(false) COMPLETE HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || action COMPLETE HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,101 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=57},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@7febcd7b] ([p=141,l=141,c=8192,r=0],[p=57,l=57,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,103 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || complete(org.eclipse.jetty.util.Callback$3@13851a7b) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,103 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || completed HttpChannelState@e1de8c{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || unhandle HttpChannelState@e1de8c{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,103 DEBUG || nextAction(false) TERMINATED HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || action TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,103 DEBUG || onCompleted for /connectors/vk_nau27_src/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,103 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,103 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=5} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,103 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60032], selector=sun.nio.ch.EPollSelectorImpl@70fb35a4, interestOps=1, readyOps=1 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || task CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,103 DEBUG || ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} tryExecute EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0}][pc=0,pic=0,pec=0,epc=42]@2023-08-25T13:57:20.10372+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,103 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,103 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,103 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,103 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=5} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,103 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=42]@2023-08-25T13:57:20.103768+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,103 DEBUG || complete(org.eclipse.jetty.util.Callback$3@67ddffa) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,103 DEBUG || fillable FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,103 DEBUG || onContentComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,103 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,103 DEBUG || onRequestComplete HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,103 DEBUG || filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,103 DEBUG || HttpInputOverHTTP@4043abec[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,103 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || ReservedThread@11a632a9{RESERVED,thread=Thread[qtp974320615-154,5,main]} task=EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=42]@2023-08-25T13:57:20.103789+03:00 ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,103 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,103 DEBUG || EatWhatYouKill@18f8e2e0/SelectorProducer@1203d787/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=1/8,pending=0}][pc=0,pic=0,pec=0,epc=43]@2023-08-25T13:57:20.103994+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,104 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,104 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,104 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,103 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,103 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 150 HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || Key interests updated 1 -> 0 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=END,0 of -1},g=HttpGenerator@12b96a37{s=END}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=12,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/tasks/0/status,age=3} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\r...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=150,c=8192,r=150]={<<>>json\r\n\r\n\r...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || recycle HttpChannelState@e1de8c{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,104 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || onCompleted for /connectors/vk_nau27_sink/tasks/0/status written=57 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || !handle TERMINATED HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink/tasks/0/status HTTP/1.1" 200 57 "-" "ReactorNetty/1.1.6" 6 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>HTTP/1.1 ...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} GET //naument:8083/connectors/vk_nau27_sink/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || releaseRequestBuffer HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || handle //naument:8083/connectors/vk_nau27_sink/topics HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || fillInterested HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=-,flush=-,to=1/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,104 DEBUG || handling HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,104 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || interested FillInterest@1c22c352{AC.ReadCB@268327b3{HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,104 DEBUG || action DISPATCH HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,104 DEBUG || REQUEST GET /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,104 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da on ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,104 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,104 DEBUG || scope null||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,104 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,104 DEBUG || Wakeup on submit ManagedSelector@5981f2c6{STARTED} id=2 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,104 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,104 DEBUG || context=||/connectors/vk_nau27_sink/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,104 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,104 DEBUG || HttpConnection@268327b3::SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=6} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=3/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/tasks/0/status,age=7} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,104 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,105 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@2323d1da [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,105 DEBUG || ReservedThread@2a2521{RUNNING,thread=Thread[qtp974320615-175,5,main]} was=RUNNING next=RESERVED size=0+2 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,105 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,105 DEBUG || ReservedThread@2a2521{RESERVED,thread=Thread[qtp974320615-175,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=2/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,105 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,105 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,105 DEBUG || servlet ||/connectors/vk_nau27_sink/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,105 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,105 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,105 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@482e05b3{l=/10.0.2.144:8083,r=/10.0.2.141:60016,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@268327b3[p=HttpParser{s=START,0 of -1},g=HttpGenerator@12b96a37{s=START}]=>HttpChannelOverHttp@1b42f9f4{s=HttpChannelState@e1de8c{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=12,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,105 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@1c2db2c waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,105 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,105 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,105 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=4/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,105 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,105 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,105 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db on ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || Wakeup on submit ManagedSelector@6942ee48{STARTED} id=3 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || write(array HeapByteBuffer@32f5d72e[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,105 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,105 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=75 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,105 DEBUG || ReservedThread@7f5ad31e{RUNNING,thread=Thread[qtp974320615-123,5,main]} was=RUNNING next=RESERVED size=0+3 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,105 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,105 DEBUG || ReservedThread@7f5ad31e{RESERVED,thread=Thread[qtp974320615-123,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,105 DEBUG || sendResponse info=null content=HeapByteBuffer@49faccf2[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@268993b0{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || COMMIT for /connectors/vk_nau27_sink/topics on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || generate: NEED_HEADER for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a48fac5] (null,[p=0,l=75,c=8192,r=75],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=75} last=true content=HeapByteBuffer@49faccf2[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,106 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 75 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,106 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,106 DEBUG || generate: FLUSH for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a48fac5] ([p=0,l=141,c=8192,r=141],[p=0,l=75,c=8192,r=75],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || write: WriteFlusher@4cffcacd{IDLE}->null [HeapByteBuffer@1d18b05f[p=0,l=141,c=8192,r=141]={<<>>\r\non\r\n\r\nj...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@49faccf2[p=0,l=75,c=8192,r=75]={<<<{"vk_nau27_sink":{"topics...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,106 DEBUG || update WriteFlusher@4cffcacd{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,106 DEBUG || flushed 216 SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=W,to=2/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=COMPLETING}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,106 DEBUG || Flushed=true written=216 remaining=0 WriteFlusher@4cffcacd{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,106 DEBUG || update WriteFlusher@4cffcacd{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,106 DEBUG || generate: DONE for SendCallback@73deedb1[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=75},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@a48fac5] ([p=141,l=141,c=8192,r=0],[p=75,l=75,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,106 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,106 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,106 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || nextAction(false) COMPLETE HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || action COMPLETE HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || complete(org.eclipse.jetty.util.Callback$3@74ab0766) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,106 DEBUG || completed HttpChannelState@39f730d3{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || unhandle HttpChannelState@39f730d3{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || nextAction(false) TERMINATED HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || action TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || onCompleted for /connectors/vk_nau27_sink/topics written=75 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_sink/topics HTTP/1.1" 200 75 "-" "ReactorNetty/1.1.6" 2 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,106 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,106 DEBUG || filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,106 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} filled 0 HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parse HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || parseNext s=CONTENT HeapByteBuffer@360c19be[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,106 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,106 DEBUG || onContentComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,106 DEBUG || onRequestComplete HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,106 DEBUG || HttpInputOverHTTP@223616f5[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,106 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=END,0 of -1},g=HttpGenerator@358d76b0{s=END}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=13,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_sink/topics,age=2} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,106 DEBUG || recycle HttpChannelState@39f730d3{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,106 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,106 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,106 DEBUG || !handle TERMINATED HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,107 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || parseNext s=START HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || releaseRequestBuffer HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || fillInterested HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=-,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,107 DEBUG || interested FillInterest@357cb97a{AC.ReadCB@1b4d4160{HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/0,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,107 DEBUG || changeInterests p=false 0->1 for SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || Queued change lazy=false org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 on ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Wakeup on submit ManagedSelector@2dd08ff1{STARTED} id=0 keys=1 selected=0 updates=1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken with none selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 woken up from select, 0/0/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@1b4d4160::SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=0/1,kio=0,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,107 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,106 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@7e1287db [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 processing 0 keys, 1 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || updateable 1 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || update org.eclipse.jetty.io.ChannelEndPoint$$Lambda$754/0x00000008408dc040@44cbb8f4 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 woken up from select, 1/1/1 selected [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 processing 1 keys, 0 updates [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Key interests updated 0 -> 1 on SocketChannelEndPoint@e4815ba{l=/10.0.2.144:8083,r=/10.0.2.141:60032,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@1b4d4160[p=HttpParser{s=START,0 of -1},g=HttpGenerator@358d76b0{s=START}]=>HttpChannelOverHttp@63606d6f{s=HttpChannelState@39f730d3{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@70fb35a4 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || selected 1 channel=java.nio.channels.SocketChannel[connected local=/10.0.2.144:8083 remote=/10.0.2.141:60020], selector=sun.nio.ch.EPollSelectorImpl@2b071d26, interestOps=1, readyOps=1 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,107 DEBUG || onSelected 1->0 r=true w=false for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || task CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} tryExecute EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=false/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:20.10764+03:00 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,107 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/IDLE/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=39]@2023-08-25T13:57:20.107716+03:00 m=EXECUTE_PRODUCE_CONSUME t=CEP:SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}:runFillable:BLOCKING/BLOCKING [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,107 DEBUG || fillable FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=2/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} onFillable enter HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} filled 149 HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=13,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,107 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=149,c=8192,r=149]={<<>>json\r\n\r\n\n...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,107 DEBUG || START --> SPACE1 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,107 DEBUG || SPACE1 --> URI [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,107 DEBUG || URI --> SPACE2 [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || SPACE2 --> REQUEST_VERSION [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || REQUEST_VERSION --> HEADER [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Accept-Encoding: gzip --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Accept-Encoding: gzip --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:User-Agent: ReactorNetty/1.1.6 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Host: naument:8083 --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Host: naument:8083 --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Accept: application/json --> IN_VALUE [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER:Accept: application/json --> FIELD [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || HEADER --> CONTENT [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,108 DEBUG || REQUEST for //naument:8083/connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} GET //naument:8083/connectors/vk_nau27_src/topics HTTP/1.1 Accept-Encoding: gzip User-Agent: ReactorNetty/1.1.6 Host: naument:8083 Accept: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,108 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} parsed true HttpParser{s=CONTENT,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,108 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,108 DEBUG || handle //naument:8083/connectors/vk_nau27_src/topics HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,108 DEBUG || handling HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,108 DEBUG || action DISPATCH HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,108 DEBUG || REQUEST GET /connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=0} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,108 DEBUG || scope null||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,108 DEBUG || context=||/connectors/vk_nau27_src/topics @ o.e.j.s.ServletContextHandler@5cb6abc8{/,null,AVAILABLE} [org.eclipse.jetty.server.handler.ContextHandler] 2023-08-25 13:57:20,108 DEBUG || Entering scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1, dispatch=REQUEST asyncstarted=false [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,108 DEBUG || sessionHandler=org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 session=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,108 DEBUG || servlet ||/connectors/vk_nau27_src/topics -> org.glassfish.jersey.servlet.ServletContainer-1e406cbe==org.glassfish.jersey.servlet.ServletContainer@f563f84e{jsp=null,order=-1,inst=true,async=true,src=EMBEDDED:null,STARTED} [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,108 DEBUG || chain=null [org.eclipse.jetty.servlet.ServletHandler] 2023-08-25 13:57:20,109 DEBUG || write(array HeapByteBuffer@79c50622[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}) [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,109 DEBUG || write(array) s=CLOSING,api=BLOCKED,sc=false,e=null last=true agg=false flush=true async=false, len=85 null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,109 DEBUG || sendResponse info=null content=HeapByteBuffer@276153dd[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} complete=true committing=true callback=Blocker@57a904d9{null} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || COMMIT for /connectors/vk_nau27_src/topics on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} 200 OK HTTP/1.1 Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || generate: NEED_HEADER for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@77e81d6f] (null,[p=0,l=85,c=8192,r=85],true)@START [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || generateHeaders HTTP/1.1{s=200,h=3,cl=85} last=true content=HeapByteBuffer@276153dd[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,109 DEBUG || Date: Fri, 25 Aug 2023 10:57:20 GMT Content-Length: 85 Content-Type: application/json [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,109 DEBUG || CONTENT_LENGTH [org.eclipse.jetty.http.HttpGenerator] 2023-08-25 13:57:20,109 DEBUG || generate: FLUSH for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@77e81d6f] ([p=0,l=141,c=8192,r=141],[p=0,l=85,c=8192,r=85],true)@COMPLETING [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || write: WriteFlusher@62b6766d{IDLE}->null [HeapByteBuffer@360c19be[p=0,l=141,c=8192,r=141]={<<>>ication/j...\x00\x00\x00\x00\x00\x00\x00},HeapByteBuffer@276153dd[p=0,l=85,c=8192,r=85]={<<<{"vk_nau27_src":{"topics"...BL_SERVICECALL_NC27"]}}>>>\x00\x00\x00\x00\x00\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00}] [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,109 DEBUG || update WriteFlusher@62b6766d{WRITING}->null:IDLE-->WRITING [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,109 DEBUG || flushed 226 SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=W,to=1/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=COMPLETING}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMMITTED is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,109 DEBUG || Flushed=true written=226 remaining=0 WriteFlusher@62b6766d{WRITING}->null [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,109 DEBUG || update WriteFlusher@62b6766d{IDLE}->null:WRITING-->IDLE [org.eclipse.jetty.io.WriteFlusher] 2023-08-25 13:57:20,109 DEBUG || generate: DONE for SendCallback@2fc92c3f[PROCESSING][i=HTTP/1.1{s=200,h=3,cl=85},cb=org.eclipse.jetty.server.HttpChannel$SendCallback@77e81d6f] ([p=141,l=141,c=8192,r=0],[p=85,l=85,c=8192,r=0],true)@END [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || onWriteComplete(true,null) s=CLOSING,api=BLOCKED,sc=false,e=null->s=CLOSED,api=BLOCKING,sc=false,e=null c=null cb=null w=false [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,109 DEBUG || Leaving scope org.eclipse.jetty.server.session.SessionHandler447878707==dftMaxIdleSec=-1 dispatch=REQUEST, async=false, session=null, oldsession=null, oldsessionhandler=null [org.eclipse.jetty.server.session] 2023-08-25 13:57:20,109 DEBUG || handled=true async=false committed=true on HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.Server] 2023-08-25 13:57:20,109 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=BLOCKING os=COMPLETED is=IDLE awp=false se=false i=true al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,109 DEBUG || nextAction(false) COMPLETE HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,109 DEBUG || action COMPLETE HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=false/true,a=HANDLING,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || complete(org.eclipse.jetty.util.Callback$3@6a2a8bf) s=CLOSED,api=BLOCKING,sc=false,e=null s=true e=null, c=null [org.eclipse.jetty.server.HttpOutput] 2023-08-25 13:57:20,109 DEBUG || completed HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETING os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,109 DEBUG || unhandle HttpChannelState@2b2988f4{s=HANDLING rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,109 DEBUG || nextAction(false) TERMINATED HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,109 DEBUG || action TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || onCompleted for /connectors/vk_nau27_src/topics written=85 [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 INFO || 10.0.2.141 - - [25/Aug/2023:10:57:20 +0000] "GET /connectors/vk_nau27_src/topics HTTP/1.1" 200 85 "-" "ReactorNetty/1.1.6" 1 [org.apache.kafka.connect.runtime.rest.RestServer] 2023-08-25 13:57:20,109 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,109 DEBUG || filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,109 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} filled 0 HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=CONTENT,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} parse HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || parseNext s=CONTENT HeapByteBuffer@7b3e78db[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,109 DEBUG || CONTENT --> CONTENT_END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,109 DEBUG || onContentComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || CONTENT_END --> END [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,109 DEBUG || onRequestComplete HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,109 DEBUG || HttpInputOverHTTP@1b685a71[c=0,q=0,[0]=null,s=STREAM] addContent EOF [org.eclipse.jetty.server.HttpInput] 2023-08-25 13:57:20,109 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} parsed false HttpParser{s=END,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=END,0 of -1},g=HttpGenerator@46770fdd{s=END}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0},r=14,c=true/true,a=IDLE,uri=//naument:8083/connectors/vk_nau27_src/topics,age=1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,109 DEBUG || recycle HttpChannelState@2b2988f4{s=IDLE rs=COMPLETED os=COMPLETED is=IDLE awp=false se=false i=false al=0} [org.eclipse.jetty.server.HttpChannelState] 2023-08-25 13:57:20,110 DEBUG || reset HttpParser{s=END,0 of -1} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,110 DEBUG || END --> START [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,110 DEBUG || !handle TERMINATED HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpChannel] 2023-08-25 13:57:20,110 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,110 DEBUG || filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,110 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} filled 0 HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,110 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parse HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} {} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,110 DEBUG || parseNext s=START HeapByteBuffer@1d18b05f[p=0,l=0,c=8192,r=0]={<<<>>>GET /conn...\x00\x00\x00\x00\x00\x00\x00} [org.eclipse.jetty.http.HttpParser] 2023-08-25 13:57:20,110 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} parsed false HttpParser{s=START,0 of -1} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,110 DEBUG || releaseRequestBuffer HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,110 DEBUG || fillInterested HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=-,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.AbstractConnection] 2023-08-25 13:57:20,110 DEBUG || interested FillInterest@217e41cf{AC.ReadCB@16b9214a{HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/0,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0}}} [org.eclipse.jetty.io.FillInterest] 2023-08-25 13:57:20,110 DEBUG || changeInterests p=true 0->1 for SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,110 DEBUG || HttpConnection@16b9214a::SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} onFillable exit HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0} null [org.eclipse.jetty.server.HttpConnection] 2023-08-25 13:57:20,110 DEBUG || updateable 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,110 DEBUG || updates 0 [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,110 DEBUG || Key interests updated 1 -> 1 on SocketChannelEndPoint@78aac236{l=/10.0.2.144:8083,r=/10.0.2.141:60020,OPEN,fill=FI,flush=-,to=0/30000}{io=1/1,kio=1,kro=1}->HttpConnection@16b9214a[p=HttpParser{s=START,0 of -1},g=HttpGenerator@46770fdd{s=START}]=>HttpChannelOverHttp@219bf7c6{s=HttpChannelState@2b2988f4{s=IDLE rs=BLOCKING os=OPEN is=IDLE awp=false se=false i=true al=0},r=14,c=false/false,a=IDLE,uri=null,age=0} [org.eclipse.jetty.io.ChannelEndPoint] 2023-08-25 13:57:20,110 DEBUG || Selector sun.nio.ch.EPollSelectorImpl@2b071d26 waiting with 1 keys [org.eclipse.jetty.io.ManagedSelector] 2023-08-25 13:57:20,110 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} task=EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.110464+03:00 ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,110 DEBUG || EatWhatYouKill@49639118/SelectorProducer@45451333/PRODUCING/p=true/QueuedThreadPool[qtp974320615]@3a12f3e7{STARTED,8<=11<=200,i=2,r=-1,q=0}[ReservedThreadExecutor@71687d8f{reserved=3/8,pending=0}][pc=0,pic=0,pec=0,epc=40]@2023-08-25T13:57:20.110499+03:00 tryProduce true [org.eclipse.jetty.util.thread.strategy.EatWhatYouKill] 2023-08-25 13:57:20,110 DEBUG || ReservedThread@59895b24{RUNNING,thread=Thread[qtp974320615-121,5,main]} was=RUNNING next=RESERVED size=0+4 capacity=8 [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,110 DEBUG || ReservedThread@59895b24{RESERVED,thread=Thread[qtp974320615-121,5,main]} waiting ReservedThreadExecutor@71687d8f{reserved=4/8,pending=0} [org.eclipse.jetty.util.thread.ReservedThreadExecutor] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2186, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=726) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455094 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,162 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2189, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,162 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147455094 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=231, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=182) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2434 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,260 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=232, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=182, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,260 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2434 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2194, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=727) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2197, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,294 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,295 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,295 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:20,295 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:20,295 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,295 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2195, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,359 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,359 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=729) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369017 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2198, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,361 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=737, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,361 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,361 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,361 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=731) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,362 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147367992 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=738, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,362 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,362 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147367991 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,375 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2196, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=730) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,376 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2199, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,376 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147369000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2188, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=726) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454823 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2190, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2187, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=726) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,433 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454823 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,433 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2191, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=726, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,434 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454822 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2189, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=727) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454593 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2192, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454593 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=232, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,760 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,761 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,761 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=183) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,761 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,761 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,761 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1934 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,761 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=233, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=183, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,761 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,761 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1933 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2197, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,767 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,767 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=728) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368609 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2200, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,795 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,796 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,796 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:20,796 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:20,796 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,796 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2198, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=730) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368516 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,860 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2201, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,860 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368516 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=738, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=732) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,862 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147367491 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,862 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=739, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,862 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,862 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147367491 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2199, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=731) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368500 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,876 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2202, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,876 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368500 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2190, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2191, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,934 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=727) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=727) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454322 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2193, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,935 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2194, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=727, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:20,935 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454321 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2192, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=728) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454093 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2195, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147454093 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,261 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=233, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,261 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,261 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,262 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,262 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=184) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,262 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,262 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,262 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1433 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,262 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=234, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=184, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,262 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,262 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1432 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,267 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2200, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,267 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,267 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=729) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368109 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,268 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2203, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,268 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368108 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,296 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,297 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,297 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:21,297 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:21,297 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,297 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2201, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,360 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,360 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=731) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368016 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2204, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147368015 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=739, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,362 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=733) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,363 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147366991 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,363 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=740, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,363 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,363 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147366990 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2202, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=732) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,377 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2205, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,377 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2193, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=728) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453821 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,435 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2196, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,435 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453821 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2194, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=728) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453820 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,436 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2197, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=728, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,436 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453820 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,616 TRACE || [Producer clientId=naument--offsets] No load stats for topic naument-offset, not using adaptive [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:21,662 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2195, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,662 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,662 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=729) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453594 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,663 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2198, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,663 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453593 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,695 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,696 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:21,697 TRACE Oracle|vk_nau27|streaming Current time 1692961041697 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:21,697 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,698 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 2800. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:21,698 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599326 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:21,698 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,699 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599327' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,703 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,705 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599288, endScn=289931599326, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:21,705 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599289', endScn => '289931599326', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,713 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599288, 289931599326] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,717 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:21,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:21,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=137, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=137, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,717 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=234, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=185) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,762 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 932 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,762 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=235, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=185, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,763 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,763 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 931 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2203, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=730) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,768 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,768 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2206, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,769 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367608 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,797 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,798 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:21,798 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:21,798 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,798 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599297, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:18Z, transactionId='090016001ff32f00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be0e.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Transaction 090016001ff32f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599300, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:18Z, transactionId='130012009db92b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be14.015c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Transaction 130012009db92b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_ACCESS_KEY' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599305, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:19Z, transactionId='0e00010081452900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be18.0188, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Transaction 0e00010081452900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_SIMPLE_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599308, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='07000600f4ce2d00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be2e.00e8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Transaction 07000600f4ce2d00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.TBL_SYS_TASK_DATE' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN 289931599310) LogMinerEventRow{scn=289931599311, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='0d000100dedb2900', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be3a.0010, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,802 TRACE Oracle|vk_nau27|streaming Transaction 0d000100dedb2900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599314, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='0400200007c82a00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be40.0090, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Transaction 0400200007c82a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599317, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='09001200c9ea2f00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be50.0044, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Transaction 09001200c9ea2f00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599320, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='030003004b792800', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be5f.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Transaction 030003004b792800 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Skipping change associated with table 'NAUMENT1.NAUSD40.QRTZ_FIRED_TRIGGERS' which does not match filters. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599323, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:20Z, transactionId='130007001bb52b00', operation='COMMIT', userName='NAUSD40', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be6e.01a8, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming Transaction 130007001bb52b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 DEBUG Oracle|vk_nau27|streaming Counters{rows=34, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=9, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 1803. Offset SCN: 289931599288, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599323, txIds=[130007001bb52b00]}}], Active Transactions: 0, Sleep: 2800 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:21,803 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,805 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,806 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,808 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:21,810 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2204, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=732) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,861 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2207, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,861 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367515 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=740, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=734) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,863 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147366490 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,863 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=741, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,863 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,863 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147366490 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2205, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=733) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,877 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367499 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,877 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2208, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,878 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367499 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,887 TRACE || SignalProcessor processing [io.debezium.pipeline.signal.SignalProcessor] 2023-08-25 13:57:21,887 TRACE || Reading signaling events from queue [io.debezium.pipeline.signal.channels.SourceSignalChannel] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2196, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,936 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=729) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453320 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2199, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2197, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=729) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453319 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:21,937 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2200, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=729, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:21,937 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453319 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2198, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,163 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,163 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=730) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453093 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,164 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2201, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,164 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147453092 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=235, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=186) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,263 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 431 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,263 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=236, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=186, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,264 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,264 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 431 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2206, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=731) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367107 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,269 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2209, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,269 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,270 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,270 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367107 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,298 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,299 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,299 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,299 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:22,299 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:22,299 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,299 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2207, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,361 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,361 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=733) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,362 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367015 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,362 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2210, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,362 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147367014 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=741, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=735) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,364 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147365989 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,364 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=742, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,364 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,364 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147365989 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2208, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=734) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366997 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,379 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2211, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,379 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366997 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2199, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=730) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452819 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2202, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2200, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=730) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,437 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452819 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,437 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2203, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=730, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,438 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452818 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,664 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2201, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,664 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=731) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452592 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2204, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,665 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452591 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,695 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:22,695 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:22,695 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=237, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,695 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,695 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 3000 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=237, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:22,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2999 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=236, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=187) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,764 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2931 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,764 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=238, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=187, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,765 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,765 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2930 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2209, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=732) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,770 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366606 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,770 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2212, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,770 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,771 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,771 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,771 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,771 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,771 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366606 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,799 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,800 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,800 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:22,800 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:22,800 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,800 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2210, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=734) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366514 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,862 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2213, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,862 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366514 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=742, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=736) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,864 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147365489 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,864 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=743, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,865 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,865 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147365488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2211, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=735) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,879 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366497 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,879 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2214, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2202, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=731) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,938 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452318 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2205, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2203, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=731) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452317 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:22,939 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2206, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=731, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:22,939 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452317 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,165 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2204, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,165 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,165 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=732) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452091 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2207, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147452090 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,265 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=238, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,265 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,265 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,266 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,266 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=188) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,266 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,266 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,266 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2430 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,266 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=239, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=188, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,266 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,266 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 2429 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2212, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=733) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,271 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366105 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,272 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2215, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,272 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366104 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,300 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,301 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,301 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:23,301 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:23,301 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,301 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2213, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,363 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,363 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=735) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366013 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2216, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147366012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=743, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=737) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,365 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147364988 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,365 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=744, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,365 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,365 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147364988 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2214, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=736) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,380 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2217, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,380 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365996 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2205, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=732) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451817 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2208, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2206, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=732) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451817 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,439 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2209, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=732, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,439 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451817 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2207, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,665 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=733) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451591 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2210, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451590 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=239, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=189) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,766 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1929 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,766 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=240, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=189, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,767 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,767 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1928 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2215, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=734) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,772 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365604 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2218, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365603 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,801 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,802 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,802 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:23,802 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:23,802 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,802 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2216, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=736) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2219, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=744, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=738) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,865 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147364488 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,865 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=745, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,866 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,866 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147364487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2217, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=737) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,880 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365496 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,881 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2220, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,881 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365495 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2208, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=733) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451316 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,940 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2211, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,940 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451316 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2209, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=733) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451315 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:23,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2212, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=733, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:23,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451315 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,165 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2210, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=734) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451090 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2213, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147451090 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,267 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=240, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,267 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,267 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,267 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,267 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=190) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1428 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=241, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=190, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 1427 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,272 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2218, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,272 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,272 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=735) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365104 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2221, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365103 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,302 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,303 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,303 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:24,303 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:24,303 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,303 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2219, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=737) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,364 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2222, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,364 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147365012 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=745, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=739) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,366 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147363987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,366 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=746, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=739, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,366 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,366 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147363987 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2220, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=738) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364995 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,381 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2223, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,381 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364995 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2211, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,440 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=734) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450816 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2214, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450815 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2212, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=734) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,441 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450815 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,441 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2215, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=734, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450814 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,610 TRACE Oracle|vk_nau27|streaming running 'SELECT SYSTIMESTAMP FROM DUAL' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,611 TRACE Oracle|vk_nau27|streaming Timezone offset of database system time is 10800 seconds [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:24,611 TRACE Oracle|vk_nau27|streaming Current time 1692961044611 ms, database difference 0 ms [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:24,611 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,612 DEBUG Oracle|vk_nau27|streaming Updating sleep time window. Sleep time 3000. Min sleep time 0. Max sleep time 3000. [io.debezium.connector.oracle.OracleStreamingChangeEventSourceMetrics] 2023-08-25 13:57:24,612 DEBUG Oracle|vk_nau27|streaming Using current SCN 289931599346 as end SCN. [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:24,612 TRACE Oracle|vk_nau27|streaming running 'SELECT CURRENT_SCN FROM V$DATABASE' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,613 TRACE Oracle|vk_nau27|streaming executing 'UPDATE LOG_MINING_FLUSH SET LAST_SCN = 289931599347' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,616 TRACE Oracle|vk_nau27|streaming running 'SELECT SEQUENCE# FROM V$LOG WHERE STATUS = 'CURRENT' ORDER BY SEQUENCE#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,618 TRACE Oracle|vk_nau27|streaming Starting mining session startScn=289931599323, endScn=289931599346, strategy=CATALOG_IN_REDO, continuous=false [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:24,618 TRACE Oracle|vk_nau27|streaming Executing statement BEGIN sys.dbms_logmnr.start_logmnr(startScn => '289931599324', endScn => '289931599346', OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.DDL_DICT_TRACKING + DBMS_LOGMNR.NO_ROWID_IN_STMT);END; [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,623 DEBUG Oracle|vk_nau27|streaming Fetching results for SCN [289931599323, 289931599346] [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2213, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=735) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450590 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,666 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2216, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,666 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450590 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,705 TRACE Oracle|vk_nau27|streaming Skipped change associated with flush table 'NAUMENT1.DEBEZIUM.LOG_MINING_FLUSH' [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599328, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:21Z, transactionId='14000700fdaf2b00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be71.0088, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Transaction 14000700fdaf2b00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599331, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:21Z, transactionId='08000d00985c2e00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be77.0194, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Transaction 08000d00985c2e00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599334, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:21Z, transactionId='120018007e522a00', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be7c.0160, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Transaction 120018007e522a00 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599337, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:21Z, transactionId='0e00030011412900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be80.011c, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Transaction 0e00030011412900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Commit (smallest SCN null) LogMinerEventRow{scn=289931599340, tableId='null', tableName='null', tablespaceName='null', eventType=COMMIT, changeTime=2023-08-25T13:57:21Z, transactionId='0d00200097d82900', operation='COMMIT', userName='DEBEZIUM', rowId='AAAAAAAAAAAAAAAAAA', rollbackFlag=false, rsId=0x0098e8.0002be94.00a0, ssn=0, redoSql='commit;'} [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming Transaction 0d00200097d82900 has 0 events [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 DEBUG Oracle|vk_nau27|streaming Counters{rows=11, stuckCount=0, dmlCount=0, ddlCount=0, insertCount=0, updateCount=0, deleteCount=0, commitCount=5, rollbackCount=0, tableMetadataCount=0}. [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 DEBUG Oracle|vk_nau27|streaming Processed in 0 ms. Lag: 3706. Offset SCN: 289931599323, Offset Commit SCN: CommitScn [redoThreadCommitScns={1=RedoThreadCommitScn{thread=1, commitScn=289931599340, txIds=[0d00200097d82900]}}], Active Transactions: 0, Sleep: 3000 [io.debezium.connector.oracle.logminer.processor.AbstractLogMinerEventProcessor] 2023-08-25 13:57:24,706 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,708 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session uga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,709 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,711 TRACE Oracle|vk_nau27|streaming running 'SELECT VALUE FROM v$statname n, v$mystat m WHERE n.name='session pga memory max' AND n.statistic#=m.statistic#' [io.debezium.jdbc.JdbcConnection] 2023-08-25 13:57:24,714 DEBUG Oracle|vk_nau27|streaming Oracle Session UGA 17.75MB (max = 34.89MB), PGA 76.73MB (max = 138.36MB) [io.debezium.connector.oracle.logminer.LogMinerStreamingChangeEventSource] 2023-08-25 13:57:24,718 TRACE || [Heartbeat groupID=naument] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:24,718 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending Heartbeat request with generation 15 and member id connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722 to coordinator broker3:29092 (id: 2147483644 rack: null) [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:24,718 DEBUG || [Worker clientId=connect-1, groupId=naument] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=138, headerVersion=2) and timeout 40000 to node 2147483644: HeartbeatRequestData(groupId='naument', generationId=15, memberId='connect-1-a86e3c31-338c-4c35-9f90-5034b19f8722', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,718 DEBUG || [Worker clientId=connect-1, groupId=naument] Received HEARTBEAT response from node 2147483644 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connect-1, correlationId=138, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,718 DEBUG || [Worker clientId=connect-1, groupId=naument] Received successful Heartbeat response [org.apache.kafka.connect.runtime.distributed.WorkerCoordinator] 2023-08-25 13:57:24,767 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=241, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,767 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,767 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=191) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 928 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=242, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=191, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 927 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2221, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,772 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=736) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364604 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2224, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364603 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,803 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,804 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,804 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:24,804 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:24,804 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,804 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2222, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=738) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,864 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364512 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2225, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,866 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=746, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,866 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,866 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,867 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,867 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=740) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,867 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,867 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,867 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147363487 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,867 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=747, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=740, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,867 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,867 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147363486 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,881 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2223, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=739) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364494 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,882 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2226, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=739, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,882 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364494 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2214, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=735) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450315 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,941 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2217, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,941 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450315 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2215, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=735) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450313 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:24,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2218, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=735, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:24,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450313 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,095 TRACE || com.mchange.v2.async.ThreadPoolAsynchronousRunner$DeadlockDetector@26cb9b7a -- Running DeadlockDetector[Exiting. No pending tasks.] [com.mchange.v2.async.ThreadPoolAsynchronousRunner] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2216, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=736) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450090 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,166 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2219, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,166 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147450090 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=242, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=192) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 427 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=243, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=192, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,268 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 427 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2224, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=737) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364103 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2227, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364103 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,304 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,305 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:25,305 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:25,305 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,305 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2225, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=739) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364011 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,365 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2228, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=739, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,365 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147364011 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=747, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=741) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,367 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147362986 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,367 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=748, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=741, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,367 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,367 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147362986 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2226, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=740) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363994 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,382 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2229, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=740, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,382 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363994 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2217, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=736) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449814 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,442 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2220, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,442 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449814 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2218, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=736) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,443 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449813 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2221, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=736, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449812 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2219, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=737) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449589 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2222, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449589 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,695 TRACE || [Heartbeat groupID=connect-vk_nau27_sink] Sending heartbeat request with 3000ms remaining on timer [org.apache.kafka.clients.consumer.internals.Heartbeat] 2023-08-25 13:57:25,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending Heartbeat request with generation 1 and member id connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df to coordinator broker1:29092 (id: 2147483646 rack: null) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:25,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending HEARTBEAT request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=244, headerVersion=2) and timeout 30000 to node 2147483646: HeartbeatRequestData(groupId='connect-vk_nau27_sink', generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 530 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received HEARTBEAT response from node 2147483646 for request with header RequestHeader(apiKey=HEARTBEAT, apiVersion=4, clientId=connector-consumer-vk_nau27_sink-0, correlationId=244, headerVersion=2): HeartbeatResponseData(throttleTimeMs=0, errorCode=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,696 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received successful Heartbeat response [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,696 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 529 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=243, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1637453888 with response=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Added READ_UNCOMMITTED fetch request for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 at position FetchPosition{offset=2001, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=0}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built incremental fetch (sessionId=1637453888, epoch=193) for node 3. Added (), altered (), removed (), replaced () out of (vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 457 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,768 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=245, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=193, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Skipping fetch for partition vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,768 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Polling for fetches with timeout 457 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2227, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=738) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363603 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,773 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2230, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,773 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363603 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,805 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,806 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,806 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:25,806 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:25,806 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,806 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2228, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=740) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,865 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2231, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=740, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,865 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363511 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=748, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=742) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,868 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147362485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,868 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=749, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=742, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,868 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,868 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147362485 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2229, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=741) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363493 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,883 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2232, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=741, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,883 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147363493 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2220, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=737) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,943 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449313 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2223, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449312 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2221, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=737) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449312 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:25,944 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2224, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=737, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:25,944 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449312 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2222, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=738) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449089 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,167 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2225, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,167 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147449089 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,225 TRACE || WorkerSinkTask{id=vk_nau27_sink-0} Polling returned 0 messages [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,225 TRACE || WorkerSinkTask{id=vk_nau27_sink-0} Delivering batch of 0 messages to task [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 ERROR || WorkerSinkTask{id=vk_nau27_sink-0} Task threw an uncaught and unrecoverable exception. Task is being killed and will not recover until manually restarted. Error: JDBC sink connector failure [org.apache.kafka.connect.runtime.WorkerSinkTask] org.apache.kafka.connect.errors.ConnectException: JDBC sink connector failure at io.debezium.connector.jdbc.JdbcSinkConnectorTask.put(JdbcSinkConnectorTask.java:83) at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:587) at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:336) at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:237) at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:206) at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:204) at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:259) at org.apache.kafka.connect.runtime.isolation.Plugins.lambda$withClassLoader$1(Plugins.java:181) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) Caused by: org.apache.kafka.connect.errors.ConnectException: Failed to process a sink record at io.debezium.connector.jdbc.JdbcChangeEventSink.execute(JdbcChangeEventSink.java:82) at io.debezium.connector.jdbc.JdbcSinkConnectorTask.put(JdbcSinkConnectorTask.java:93) ... 12 more Caused by: jakarta.persistence.PersistenceException: Converting `org.hibernate.exception.SQLGrammarException` to JPA `PersistenceException` : JDBC exception executing SQL [INSERT INTO "naument1"."vk_nau27_tbl_servicecall" ("ID","CREATION_DATE","CLAIM_TRANSFERDATE","TITLE","CLIENT_EMAIL","CLAIM_SUMRETURN","CLAIM_SUMRETURN") VALUES (?,?,?,?,?,?,?) ON CONFLICT ("ID") DO UPDATE SET "CREATION_DATE"=EXCLUDED."CREATION_DATE","CLAIM_TRANSFERDATE"=EXCLUDED."CLAIM_TRANSFERDATE","TITLE"=EXCLUDED."TITLE","CLIENT_EMAIL"=EXCLUDED."CLIENT_EMAIL","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN"] at org.hibernate.internal.ExceptionConverterImpl.convert(ExceptionConverterImpl.java:165) at org.hibernate.internal.ExceptionConverterImpl.convert(ExceptionConverterImpl.java:175) at org.hibernate.query.spi.AbstractQuery.executeUpdate(AbstractQuery.java:654) at io.debezium.connector.jdbc.JdbcChangeEventSink.writeUpsert(JdbcChangeEventSink.java:267) at io.debezium.connector.jdbc.JdbcChangeEventSink.write(JdbcChangeEventSink.java:226) at io.debezium.connector.jdbc.JdbcChangeEventSink.execute(JdbcChangeEventSink.java:79) ... 13 more Caused by: org.hibernate.exception.SQLGrammarException: JDBC exception executing SQL [INSERT INTO "naument1"."vk_nau27_tbl_servicecall" ("ID","CREATION_DATE","CLAIM_TRANSFERDATE","TITLE","CLIENT_EMAIL","CLAIM_SUMRETURN","CLAIM_SUMRETURN") VALUES (?,?,?,?,?,?,?) ON CONFLICT ("ID") DO UPDATE SET "CREATION_DATE"=EXCLUDED."CREATION_DATE","CLAIM_TRANSFERDATE"=EXCLUDED."CLAIM_TRANSFERDATE","TITLE"=EXCLUDED."TITLE","CLIENT_EMAIL"=EXCLUDED."CLIENT_EMAIL","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN"] at org.hibernate.exception.internal.SQLStateConversionDelegate.convert(SQLStateConversionDelegate.java:89) at org.hibernate.exception.internal.StandardSQLExceptionConverter.convert(StandardSQLExceptionConverter.java:56) at org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:109) at org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:95) at org.hibernate.sql.exec.internal.StandardJdbcMutationExecutor.execute(StandardJdbcMutationExecutor.java:97) at org.hibernate.query.sql.internal.NativeNonSelectQueryPlanImpl.executeUpdate(NativeNonSelectQueryPlanImpl.java:78) at org.hibernate.query.sql.internal.NativeQueryImpl.doExecuteUpdate(NativeQueryImpl.java:820) at org.hibernate.query.spi.AbstractQuery.executeUpdate(AbstractQuery.java:643) ... 16 more Caused by: org.postgresql.util.PSQLException: ERROR: column "CLAIM_SUMRETURN" specified more than once Position: 135 at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2713) at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2401) at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:368) at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:498) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:415) at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:190) at org.postgresql.jdbc.PgPreparedStatement.executeUpdate(PgPreparedStatement.java:152) at com.mchange.v2.c3p0.impl.NewProxyPreparedStatement.executeUpdate(NewProxyPreparedStatement.java:1502) at org.hibernate.sql.exec.internal.StandardJdbcMutationExecutor.execute(StandardJdbcMutationExecutor.java:84) ... 19 more 2023-08-25 13:57:26,226 TRACE || Committing offsets for partitions [vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0] [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 TRACE || WorkerSinkTask{id=vk_nau27_sink-0} Calling task.preCommit with current offsets: {vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0=OffsetAndMetadata{offset=2001, leaderEpoch=null, metadata=''}} [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 DEBUG || Flushing offsets: {vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0=OffsetAndMetadata{offset=2000, leaderEpoch=null, metadata=''}} [io.debezium.connector.jdbc.JdbcSinkConnectorTask] 2023-08-25 13:57:26,226 TRACE || WorkerSinkTask{id=vk_nau27_sink-0} Closing the task before committing the offsets: {vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0=OffsetAndMetadata{offset=2001, leaderEpoch=null, metadata=''}} [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 DEBUG || WorkerSinkTask{id=vk_nau27_sink-0} Skipping offset commit, no change since last commit [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 DEBUG || WorkerSinkTask{id=vk_nau27_sink-0} Finished offset commit successfully in 0 ms for sequence number 3: null [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,226 ERROR || WorkerSinkTask{id=vk_nau27_sink-0} Task threw an uncaught and unrecoverable exception. Task is being killed and will not recover until manually restarted [org.apache.kafka.connect.runtime.WorkerTask] org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception. at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:618) at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:336) at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:237) at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:206) at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:204) at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:259) at org.apache.kafka.connect.runtime.isolation.Plugins.lambda$withClassLoader$1(Plugins.java:181) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) Caused by: org.apache.kafka.connect.errors.ConnectException: JDBC sink connector failure at io.debezium.connector.jdbc.JdbcSinkConnectorTask.put(JdbcSinkConnectorTask.java:83) at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:587) ... 11 more Caused by: org.apache.kafka.connect.errors.ConnectException: Failed to process a sink record at io.debezium.connector.jdbc.JdbcChangeEventSink.execute(JdbcChangeEventSink.java:82) at io.debezium.connector.jdbc.JdbcSinkConnectorTask.put(JdbcSinkConnectorTask.java:93) ... 12 more Caused by: jakarta.persistence.PersistenceException: Converting `org.hibernate.exception.SQLGrammarException` to JPA `PersistenceException` : JDBC exception executing SQL [INSERT INTO "naument1"."vk_nau27_tbl_servicecall" ("ID","CREATION_DATE","CLAIM_TRANSFERDATE","TITLE","CLIENT_EMAIL","CLAIM_SUMRETURN","CLAIM_SUMRETURN") VALUES (?,?,?,?,?,?,?) ON CONFLICT ("ID") DO UPDATE SET "CREATION_DATE"=EXCLUDED."CREATION_DATE","CLAIM_TRANSFERDATE"=EXCLUDED."CLAIM_TRANSFERDATE","TITLE"=EXCLUDED."TITLE","CLIENT_EMAIL"=EXCLUDED."CLIENT_EMAIL","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN"] at org.hibernate.internal.ExceptionConverterImpl.convert(ExceptionConverterImpl.java:165) at org.hibernate.internal.ExceptionConverterImpl.convert(ExceptionConverterImpl.java:175) at org.hibernate.query.spi.AbstractQuery.executeUpdate(AbstractQuery.java:654) at io.debezium.connector.jdbc.JdbcChangeEventSink.writeUpsert(JdbcChangeEventSink.java:267) at io.debezium.connector.jdbc.JdbcChangeEventSink.write(JdbcChangeEventSink.java:226) at io.debezium.connector.jdbc.JdbcChangeEventSink.execute(JdbcChangeEventSink.java:79) ... 13 more Caused by: org.hibernate.exception.SQLGrammarException: JDBC exception executing SQL [INSERT INTO "naument1"."vk_nau27_tbl_servicecall" ("ID","CREATION_DATE","CLAIM_TRANSFERDATE","TITLE","CLIENT_EMAIL","CLAIM_SUMRETURN","CLAIM_SUMRETURN") VALUES (?,?,?,?,?,?,?) ON CONFLICT ("ID") DO UPDATE SET "CREATION_DATE"=EXCLUDED."CREATION_DATE","CLAIM_TRANSFERDATE"=EXCLUDED."CLAIM_TRANSFERDATE","TITLE"=EXCLUDED."TITLE","CLIENT_EMAIL"=EXCLUDED."CLIENT_EMAIL","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN","CLAIM_SUMRETURN"=EXCLUDED."CLAIM_SUMRETURN"] at org.hibernate.exception.internal.SQLStateConversionDelegate.convert(SQLStateConversionDelegate.java:89) at org.hibernate.exception.internal.StandardSQLExceptionConverter.convert(StandardSQLExceptionConverter.java:56) at org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:109) at org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:95) at org.hibernate.sql.exec.internal.StandardJdbcMutationExecutor.execute(StandardJdbcMutationExecutor.java:97) at org.hibernate.query.sql.internal.NativeNonSelectQueryPlanImpl.executeUpdate(NativeNonSelectQueryPlanImpl.java:78) at org.hibernate.query.sql.internal.NativeQueryImpl.doExecuteUpdate(NativeQueryImpl.java:820) at org.hibernate.query.spi.AbstractQuery.executeUpdate(AbstractQuery.java:643) ... 16 more Caused by: org.postgresql.util.PSQLException: ERROR: column "CLAIM_SUMRETURN" specified more than once Position: 135 at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2713) at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2401) at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:368) at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:498) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:415) at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:190) at org.postgresql.jdbc.PgPreparedStatement.executeUpdate(PgPreparedStatement.java:152) at com.mchange.v2.c3p0.impl.NewProxyPreparedStatement.executeUpdate(NewProxyPreparedStatement.java:1502) at org.hibernate.sql.exec.internal.StandardJdbcMutationExecutor.execute(StandardJdbcMutationExecutor.java:84) ... 19 more 2023-08-25 13:57:26,226 INFO || Closing session. [io.debezium.connector.jdbc.JdbcChangeEventSink] 2023-08-25 13:57:26,226 TRACE || Closing JDBC container [org.hibernate.engine.jdbc.internal.JdbcCoordinatorImpl@3a4488e6] [org.hibernate.engine.jdbc.internal.JdbcCoordinatorImpl] 2023-08-25 13:57:26,226 TRACE || Releasing JDBC resources [org.hibernate.resource.jdbc.internal.ResourceRegistryStandardImpl] 2023-08-25 13:57:26,226 TRACE || Closing logical connection [org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl] 2023-08-25 13:57:26,226 TRACE || Logical connection closed [org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl] 2023-08-25 13:57:26,226 INFO || Closing the session factory [io.debezium.connector.jdbc.JdbcChangeEventSink] 2023-08-25 13:57:26,226 DEBUG || HHH000031: Closing [org.hibernate.internal.SessionFactoryImpl] 2023-08-25 13:57:26,227 TRACE || Handling #sessionFactoryClosed from [org.hibernate.internal.SessionFactoryImpl@717719be] for TypeConfiguration [org.hibernate.type.spi.TypeConfiguration$Scope] 2023-08-25 13:57:26,227 DEBUG || Un-scoping TypeConfiguration [org.hibernate.type.spi.TypeConfiguration$Scope@2d649dbb] from SessionFactory [org.hibernate.internal.SessionFactoryImpl@717719be] [org.hibernate.type.spi.TypeConfiguration$Scope] 2023-08-25 13:57:26,227 DEBUG || Implicitly destroying ServiceRegistry on de-registration of all child ServiceRegistries [org.hibernate.service.internal.AbstractServiceRegistryImpl] 2023-08-25 13:57:26,228 DEBUG || MBean: com.mchange.v2.c3p0:type=PooledDataSource,identityToken=2rvyc0aycaufs28j8h53|6589d483,name=2rvyc0aycaufs28j8h53|6589d483 unregistered. [com.mchange.v2.c3p0.management.ActiveManagementCoordinator] 2023-08-25 13:57:26,228 DEBUG || C3P0Registry mbean unregistered. [com.mchange.v2.c3p0.management.ActiveManagementCoordinator] 2023-08-25 13:57:26,228 TRACE || com.mchange.v2.c3p0.PoolBackedDataSource@6589d483 has been closed. [com.mchange.v2.c3p0.impl.AbstractPoolBackedDataSource] java.lang.Exception: DEBUG STACK TRACE for PoolBackedDataSource.close(). at com.mchange.v2.c3p0.impl.AbstractPoolBackedDataSource.close(AbstractPoolBackedDataSource.java:483) at com.mchange.v2.c3p0.impl.AbstractPoolBackedDataSource.close(AbstractPoolBackedDataSource.java:494) at com.mchange.v2.c3p0.DataSources.destroy(DataSources.java:372) at com.mchange.v2.c3p0.DataSources.destroy(DataSources.java:348) at org.hibernate.c3p0.internal.C3P0ConnectionProvider.stop(C3P0ConnectionProvider.java:225) at org.hibernate.service.internal.AbstractServiceRegistryImpl.stopService(AbstractServiceRegistryImpl.java:374) at org.hibernate.service.internal.AbstractServiceRegistryImpl.destroy(AbstractServiceRegistryImpl.java:357) at org.hibernate.boot.registry.internal.StandardServiceRegistryImpl.destroy(StandardServiceRegistryImpl.java:131) at org.hibernate.service.internal.AbstractServiceRegistryImpl.deRegisterChild(AbstractServiceRegistryImpl.java:407) at org.hibernate.service.internal.AbstractServiceRegistryImpl.destroy(AbstractServiceRegistryImpl.java:364) at org.hibernate.internal.SessionFactoryImpl.close(SessionFactoryImpl.java:821) at io.debezium.connector.jdbc.JdbcChangeEventSink.close(JdbcChangeEventSink.java:98) at io.debezium.connector.jdbc.JdbcSinkConnectorTask.stop(JdbcSinkConnectorTask.java:134) at org.apache.kafka.connect.runtime.WorkerSinkTask.close(WorkerSinkTask.java:173) at org.apache.kafka.connect.runtime.WorkerTask.doClose(WorkerTask.java:183) at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:216) at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:259) at org.apache.kafka.connect.runtime.isolation.Plugins.lambda$withClassLoader$1(Plugins.java:181) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) 2023-08-25 13:57:26,228 DEBUG || Preparing to destroy resource: com.mchange.v2.c3p0.impl.NewPooledConnection@349e2fde [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,228 DEBUG || Preparing to destroy PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@349e2fde [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,228 DEBUG || Implicitly destroying Boot-strap registry on de-registration of all child ServiceRegistries [org.hibernate.boot.registry.internal.BootstrapServiceRegistryImpl] 2023-08-25 13:57:26,228 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Closing the Kafka consumer [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Heartbeat thread has closed [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,229 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Executing onLeavePrepare with generation Generation{generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', protocol='range'} [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,229 INFO || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Revoke previously assigned partitions vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27-0 [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,229 TRACE || com.mchange.v2.c3p0.impl.NewPooledConnection@349e2fde closed by a client. [com.mchange.v2.c3p0.impl.NewPooledConnection] java.lang.Exception: DEBUG -- CLOSE BY CLIENT STACK TRACE at com.mchange.v2.c3p0.impl.NewPooledConnection.close(NewPooledConnection.java:659) at com.mchange.v2.c3p0.impl.NewPooledConnection.closeMaybeCheckedOut(NewPooledConnection.java:255) at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.destroyResource(C3P0PooledConnectionPool.java:622) at com.mchange.v2.resourcepool.BasicResourcePool$1DestroyResourceTask.run(BasicResourcePool.java:1114) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1139) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1100) at com.mchange.v2.resourcepool.BasicResourcePool.access$100(BasicResourcePool.java:44) at com.mchange.v2.resourcepool.BasicResourcePool$5.run(BasicResourcePool.java:1354) 2023-08-25 13:57:26,229 DEBUG || Successfully destroyed PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@349e2fde [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,229 TRACE || Skipping partition revocation callback as task has already been stopped [org.apache.kafka.connect.runtime.WorkerSinkTask] 2023-08-25 13:57:26,229 DEBUG || Successfully destroyed resource: com.mchange.v2.c3p0.impl.NewPooledConnection@349e2fde [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,229 DEBUG || Preparing to destroy resource: com.mchange.v2.c3p0.impl.NewPooledConnection@44098378 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,229 DEBUG || Preparing to destroy PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@44098378 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,229 INFO || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Member connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df sending LeaveGroup request to coordinator broker1:29092 (id: 2147483646 rack: null) due to the consumer is being closed [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,230 TRACE || com.mchange.v2.c3p0.impl.NewPooledConnection@44098378 closed by a client. [com.mchange.v2.c3p0.impl.NewPooledConnection] java.lang.Exception: DEBUG -- CLOSE BY CLIENT STACK TRACE at com.mchange.v2.c3p0.impl.NewPooledConnection.close(NewPooledConnection.java:659) at com.mchange.v2.c3p0.impl.NewPooledConnection.closeMaybeCheckedOut(NewPooledConnection.java:255) at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.destroyResource(C3P0PooledConnectionPool.java:622) at com.mchange.v2.resourcepool.BasicResourcePool$1DestroyResourceTask.run(BasicResourcePool.java:1114) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1139) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1100) at com.mchange.v2.resourcepool.BasicResourcePool.access$100(BasicResourcePool.java:44) at com.mchange.v2.resourcepool.BasicResourcePool$5.run(BasicResourcePool.java:1354) 2023-08-25 13:57:26,230 DEBUG || Successfully destroyed PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@44098378 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,230 DEBUG || Successfully destroyed resource: com.mchange.v2.c3p0.impl.NewPooledConnection@44098378 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,230 DEBUG || Preparing to destroy resource: com.mchange.v2.c3p0.impl.NewPooledConnection@55c0a883 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,230 DEBUG || Preparing to destroy PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@55c0a883 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,230 TRACE || com.mchange.v2.c3p0.impl.NewPooledConnection@55c0a883 closed by a client. [com.mchange.v2.c3p0.impl.NewPooledConnection] java.lang.Exception: DEBUG -- CLOSE BY CLIENT STACK TRACE at com.mchange.v2.c3p0.impl.NewPooledConnection.close(NewPooledConnection.java:659) at com.mchange.v2.c3p0.impl.NewPooledConnection.closeMaybeCheckedOut(NewPooledConnection.java:255) at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.destroyResource(C3P0PooledConnectionPool.java:622) at com.mchange.v2.resourcepool.BasicResourcePool$1DestroyResourceTask.run(BasicResourcePool.java:1114) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1139) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1100) at com.mchange.v2.resourcepool.BasicResourcePool.access$100(BasicResourcePool.java:44) at com.mchange.v2.resourcepool.BasicResourcePool$5.run(BasicResourcePool.java:1354) 2023-08-25 13:57:26,230 DEBUG || Successfully destroyed PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@55c0a883 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,230 DEBUG || Successfully destroyed resource: com.mchange.v2.c3p0.impl.NewPooledConnection@55c0a883 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,230 DEBUG || Preparing to destroy resource: com.mchange.v2.c3p0.impl.NewPooledConnection@45f61782 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,230 DEBUG || Preparing to destroy PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@45f61782 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,230 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending LEAVE_GROUP request with header RequestHeader(apiKey=LEAVE_GROUP, apiVersion=5, clientId=connector-consumer-vk_nau27_sink-0, correlationId=246, headerVersion=2) and timeout 30000 to node 2147483646: LeaveGroupRequestData(groupId='connect-vk_nau27_sink', memberId='', members=[MemberIdentity(memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null, reason='the consumer is being closed')]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,231 TRACE || com.mchange.v2.c3p0.impl.NewPooledConnection@45f61782 closed by a client. [com.mchange.v2.c3p0.impl.NewPooledConnection] java.lang.Exception: DEBUG -- CLOSE BY CLIENT STACK TRACE at com.mchange.v2.c3p0.impl.NewPooledConnection.close(NewPooledConnection.java:659) at com.mchange.v2.c3p0.impl.NewPooledConnection.closeMaybeCheckedOut(NewPooledConnection.java:255) at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.destroyResource(C3P0PooledConnectionPool.java:622) at com.mchange.v2.resourcepool.BasicResourcePool$1DestroyResourceTask.run(BasicResourcePool.java:1114) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1139) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1100) at com.mchange.v2.resourcepool.BasicResourcePool.access$100(BasicResourcePool.java:44) at com.mchange.v2.resourcepool.BasicResourcePool$5.run(BasicResourcePool.java:1354) 2023-08-25 13:57:26,231 INFO || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Resetting generation and member id due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,231 INFO || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Request joining group due to: consumer pro-actively leaving the group [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,231 DEBUG || Successfully destroyed PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@45f61782 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,231 DEBUG || Successfully destroyed resource: com.mchange.v2.c3p0.impl.NewPooledConnection@45f61782 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,231 DEBUG || Preparing to destroy resource: com.mchange.v2.c3p0.impl.NewPooledConnection@5db918f4 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,231 DEBUG || Preparing to destroy PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@5db918f4 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,231 TRACE || com.mchange.v2.c3p0.impl.NewPooledConnection@5db918f4 closed by a client. [com.mchange.v2.c3p0.impl.NewPooledConnection] java.lang.Exception: DEBUG -- CLOSE BY CLIENT STACK TRACE at com.mchange.v2.c3p0.impl.NewPooledConnection.close(NewPooledConnection.java:659) at com.mchange.v2.c3p0.impl.NewPooledConnection.closeMaybeCheckedOut(NewPooledConnection.java:255) at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.destroyResource(C3P0PooledConnectionPool.java:622) at com.mchange.v2.resourcepool.BasicResourcePool$1DestroyResourceTask.run(BasicResourcePool.java:1114) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1139) at com.mchange.v2.resourcepool.BasicResourcePool.destroyResource(BasicResourcePool.java:1100) at com.mchange.v2.resourcepool.BasicResourcePool.access$100(BasicResourcePool.java:44) at com.mchange.v2.resourcepool.BasicResourcePool$5.run(BasicResourcePool.java:1354) 2023-08-25 13:57:26,231 DEBUG || Successfully destroyed PooledConnection: com.mchange.v2.c3p0.impl.NewPooledConnection@5db918f4 [com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool] 2023-08-25 13:57:26,231 DEBUG || Successfully destroyed resource: com.mchange.v2.c3p0.impl.NewPooledConnection@5db918f4 [com.mchange.v2.resourcepool.BasicResourcePool] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received LEAVE_GROUP response from node 2147483646 for request with header RequestHeader(apiKey=LEAVE_GROUP, apiVersion=5, clientId=connector-consumer-vk_nau27_sink-0, correlationId=246, headerVersion=2): LeaveGroupResponseData(throttleTimeMs=0, errorCode=0, members=[MemberResponse(memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null, errorCode=0)]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] LeaveGroup response with Generation{generationId=1, memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', protocol='range'} returned successfully: ClientResponse(receivedTimeMs=1692961046231, latencyMs=1, disconnected=false, timedOut=false, requestHeader=RequestHeader(apiKey=LEAVE_GROUP, apiVersion=5, clientId=connector-consumer-vk_nau27_sink-0, correlationId=246, headerVersion=2), responseBody=LeaveGroupResponseData(throttleTimeMs=0, errorCode=0, members=[MemberResponse(memberId='connector-consumer-vk_nau27_sink-0-f57996e2-d145-46ae-81e8-11dead18c2df', groupInstanceId=null, errorCode=0)])) [org.apache.kafka.clients.consumer.internals.ConsumerCoordinator] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Set the metadata for next fetch request to close the existing session ID=1637453888 [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Built full fetch (sessionId=1637453888, epoch=FINAL) for node 3 with (). [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending READ_UNCOMMITTED FullFetchRequest(toSend=(), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,232 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=247, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1637453888, sessionEpoch=-1, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,241 TRACE || [Producer clientId=connector-producer-vk_nau27_src-0] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27 [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:26,242 TRACE || [Producer clientId=connector-producer-vk_nau27_src-0] The number of partitions is too small: available=1, all=1, not using adaptive for topic vk_nau27.DEBEZIUM.GBC_TBL_SERVICECALL_NC27 [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:26,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=245, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1637453888, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Node 3 sent a full fetch response that created a new incremental fetch session 1637453888 with response=() [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=connector-consumer-vk_nau27_sink-0, correlationId=247, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=0, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,268 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Successfully sent a close message for fetch session: 1637453888 to node: broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=last-poll-seconds-ago, group=consumer-metrics, description=The number of seconds since the last poll() invocation., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=time-between-poll-avg, group=consumer-metrics, description=The average delay between invocations of poll() in milliseconds., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=time-between-poll-max, group=consumer-metrics, description=The max delay between invocations of poll() in milliseconds., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed sensor with name time-between-poll [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=poll-idle-ratio-avg, group=consumer-metrics, description=The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed sensor with name poll-idle-ratio-avg [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=commit-sync-time-ns-total, group=consumer-metrics, description=The total time the consumer has spent in commitSync in nanoseconds, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed sensor with name commit-sync-time-ns-total [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=committed-time-ns-total, group=consumer-metrics, description=The total time the consumer has spent in committed in nanoseconds, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed sensor with name committed-time-ns-total [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 INFO || Metrics scheduler closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 INFO || Closing reporter org.apache.kafka.common.metrics.JmxReporter [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 INFO || Metrics reporters closed [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,268 TRACE || Removed metric named MetricName [name=connection-count, group=consumer-metrics, description=The current number of active connections., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=connection-close-total, group=consumer-metrics, description=The total number of connections closed, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=connection-close-rate, group=consumer-metrics, description=The number of connections closed per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name connections-closed: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=connection-creation-total, group=consumer-metrics, description=The total number of new connections established, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=connection-creation-rate, group=consumer-metrics, description=The number of new connections established per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name connections-created: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=successful-authentication-total, group=consumer-metrics, description=The total number of connections with successful authentication, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=successful-authentication-rate, group=consumer-metrics, description=The number of connections with successful authentication per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name successful-authentication: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=successful-reauthentication-total, group=consumer-metrics, description=The total number of successful re-authentication of connections, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=successful-reauthentication-rate, group=consumer-metrics, description=The number of successful re-authentication of connections per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name successful-reauthentication: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=successful-authentication-no-reauth-total, group=consumer-metrics, description=The total number of connections with successful authentication where the client does not support re-authentication, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name successful-authentication-no-reauth: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=failed-authentication-total, group=consumer-metrics, description=The total number of connections with failed authentication, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=failed-authentication-rate, group=consumer-metrics, description=The number of connections with failed authentication per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name failed-authentication: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=failed-reauthentication-total, group=consumer-metrics, description=The total number of failed re-authentication of connections, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=failed-reauthentication-rate, group=consumer-metrics, description=The number of failed re-authentication of connections per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name failed-reauthentication: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=reauthentication-latency-max, group=consumer-metrics, description=The max latency observed due to re-authentication, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=reauthentication-latency-avg, group=consumer-metrics, description=The average latency observed due to re-authentication, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name reauthentication-latency: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=network-io-total, group=consumer-metrics, description=The total number of network operations (reads or writes) on all connections, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=network-io-rate, group=consumer-metrics, description=The number of network operations (reads or writes) on all connections per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name bytes-sent-received: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=outgoing-byte-total, group=consumer-metrics, description=The total number of outgoing bytes sent to all servers, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=outgoing-byte-rate, group=consumer-metrics, description=The number of outgoing bytes sent to all servers per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name bytes-sent: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=incoming-byte-total, group=consumer-metrics, description=The total number of bytes read off all sockets, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=incoming-byte-rate, group=consumer-metrics, description=The number of bytes read off all sockets per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name bytes-received: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-total, group=consumer-metrics, description=The total number of requests sent, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-rate, group=consumer-metrics, description=The number of requests sent per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-size-avg, group=consumer-metrics, description=The average size of requests sent., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-size-max, group=consumer-metrics, description=The maximum size of any request sent., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name requests-sent: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=response-total, group=consumer-metrics, description=The total number of responses received, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=response-rate, group=consumer-metrics, description=The number of responses received per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name responses-received: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=select-total, group=consumer-metrics, description=The total number of times the I/O layer checked for new I/O to perform, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=select-rate, group=consumer-metrics, description=The number of times the I/O layer checked for new I/O to perform per second, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-wait-time-ns-avg, group=consumer-metrics, description=The average length of time the I/O thread spent waiting for a socket ready for reads or writes in nanoseconds., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-waittime-total, group=consumer-metrics, description=*Deprecated* The total time the I/O thread spent waiting, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-wait-ratio, group=consumer-metrics, description=*Deprecated* The fraction of time the I/O thread spent waiting, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-wait-time-ns-total, group=consumer-metrics, description=The total time the I/O thread spent waiting, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name select-time: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-time-ns-avg, group=consumer-metrics, description=The average length of time for I/O per select call in nanoseconds., tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=iotime-total, group=consumer-metrics, description=*Deprecated* The total time the I/O thread spent doing I/O, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-ratio, group=consumer-metrics, description=*Deprecated* The fraction of time the I/O thread spent doing I/O, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=io-time-ns-total, group=consumer-metrics, description=The total time the I/O thread spent doing I/O, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name io-time: [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-total, group=consumer-node-metrics, description=The total number of requests sent, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-rate, group=consumer-node-metrics, description=The number of requests sent per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-size-avg, group=consumer-node-metrics, description=The average size of requests sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-size-max, group=consumer-node-metrics, description=The maximum size of any request sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name node--2.requests-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=outgoing-byte-total, group=consumer-node-metrics, description=The total number of outgoing bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=outgoing-byte-rate, group=consumer-node-metrics, description=The number of outgoing bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name node--2.bytes-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=response-total, group=consumer-node-metrics, description=The total number of responses received, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=response-rate, group=consumer-node-metrics, description=The number of responses received per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name node--2.responses-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=incoming-byte-total, group=consumer-node-metrics, description=The total number of incoming bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=incoming-byte-rate, group=consumer-node-metrics, description=The number of incoming bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name node--2.bytes-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-latency-avg, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed metric named MetricName [name=request-latency-max, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node--2}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,269 TRACE || Removed sensor with name node--2.latency [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-total, group=consumer-node-metrics, description=The total number of requests sent, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-rate, group=consumer-node-metrics, description=The number of requests sent per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-size-avg, group=consumer-node-metrics, description=The average size of requests sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-size-max, group=consumer-node-metrics, description=The maximum size of any request sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-3.requests-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=outgoing-byte-total, group=consumer-node-metrics, description=The total number of outgoing bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=outgoing-byte-rate, group=consumer-node-metrics, description=The number of outgoing bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-3.bytes-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=response-total, group=consumer-node-metrics, description=The total number of responses received, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=response-rate, group=consumer-node-metrics, description=The number of responses received per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-3.responses-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=incoming-byte-total, group=consumer-node-metrics, description=The total number of incoming bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=incoming-byte-rate, group=consumer-node-metrics, description=The number of incoming bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-3.bytes-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-latency-avg, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-latency-max, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-3}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-3.latency [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-total, group=consumer-node-metrics, description=The total number of requests sent, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-rate, group=consumer-node-metrics, description=The number of requests sent per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-size-avg, group=consumer-node-metrics, description=The average size of requests sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-size-max, group=consumer-node-metrics, description=The maximum size of any request sent., tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-2147483646.requests-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=outgoing-byte-total, group=consumer-node-metrics, description=The total number of outgoing bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=outgoing-byte-rate, group=consumer-node-metrics, description=The number of outgoing bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-2147483646.bytes-sent [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=response-total, group=consumer-node-metrics, description=The total number of responses received, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=response-rate, group=consumer-node-metrics, description=The number of responses received per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-2147483646.responses-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=incoming-byte-total, group=consumer-node-metrics, description=The total number of incoming bytes, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=incoming-byte-rate, group=consumer-node-metrics, description=The number of incoming bytes per second, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-2147483646.bytes-received [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-latency-avg, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=request-latency-max, group=consumer-node-metrics, description=, tags={client-id=connector-consumer-vk_nau27_sink-0, node-id=node-2147483646}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed sensor with name node-2147483646.latency [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] sslCiphers: entering performPendingMetricsOperations [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] sslCiphers: leaving performPendingMetricsOperations [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] sslCiphers: closed 0 metric(s). [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] clients: entering performPendingMetricsOperations [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] clients: leaving performPendingMetricsOperations [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] clients: closed 0 metric(s). [org.apache.kafka.common.network.Selector] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=version, group=app-info, description=Metric indicating version, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=commit-id, group=app-info, description=Metric indicating commit-id, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 TRACE || Removed metric named MetricName [name=start-time-ms, group=app-info, description=Metric indicating start-time-ms, tags={client-id=connector-consumer-vk_nau27_sink-0}] [org.apache.kafka.common.metrics.Metrics] 2023-08-25 13:57:26,270 INFO || App info kafka.consumer for connector-consumer-vk_nau27_sink-0 unregistered [org.apache.kafka.common.utils.AppInfoParser] 2023-08-25 13:57:26,270 DEBUG || [Consumer clientId=connector-consumer-vk_nau27_sink-0, groupId=connect-vk_nau27_sink] Kafka consumer has been closed [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Attempting to append record ProducerRecord(topic=naument-status, partition=null, headers=RecordHeaders(headers = [], isReadOnly = false), key=status-task-vk_nau27_sink-0, value=[B@122bd2d7, timestamp=null) with callback org.apache.kafka.connect.storage.KafkaStatusBackingStore$2@4a2761e6 to topic naument-status partition 1 [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Allocating a new 16384 byte message buffer for topic naument-status partition 1 with remaining timeout 60000ms [org.apache.kafka.clients.producer.internals.RecordAccumulator] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Waking up the sender since topic naument-status partition 1 is either full or getting a new batch [org.apache.kafka.clients.producer.KafkaProducer] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Partition load stats for topic naument-status: CFT=[2, 3, 5, 7, 9], IDs=[0, 1, 2, 3, 4], length=5 [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Nodes with data ready to send: [broker3:29092 (id: 3 rack: null)] [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:57:26,272 DEBUG || [Producer clientId=naument--statuses] Sending PRODUCE request with header RequestHeader(apiKey=PRODUCE, apiVersion=9, clientId=naument--statuses, correlationId=19, headerVersion=2) and timeout 30000 to node 3: {acks=-1,timeout=30000,partitionSizes=[naument-status-1=5482]} [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] Sent produce request to 3: (type=ProduceRequest, acks=-1, timeout=30000, partitionRecords=([PartitionProduceData(index=1, records=MemoryRecords(size=5482, buffer=java.nio.HeapByteBuffer[pos=0 lim=5482 cap=5482]))]), transactionalId='' [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:57:26,272 TRACE || [Producer clientId=naument--statuses] All queue lengths are the same, not using adaptive for topic naument-status [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:26,273 DEBUG || [Producer clientId=naument--statuses] Received PRODUCE response from node 3 for request with header RequestHeader(apiKey=PRODUCE, apiVersion=9, clientId=naument--statuses, correlationId=19, headerVersion=2): ProduceResponseData(responses=[TopicProduceResponse(name='naument-status', partitionResponses=[PartitionProduceResponse(index=1, errorCode=0, baseOffset=725, logAppendTimeMs=-1, logStartOffset=0, recordErrors=[], errorMessage=null)])], throttleTimeMs=0) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,273 TRACE || [Producer clientId=naument--statuses] Received produce response from node 3 with correlation id 19 [org.apache.kafka.clients.producer.internals.Sender] 2023-08-25 13:57:26,273 TRACE || Successfully produced messages to naument-status-1 with base offset 725. [org.apache.kafka.clients.producer.internals.ProducerBatch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2230, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[FetchableTopicResponse(topic='', topicId=aNsB1bK5TPelAbFsE1h5Bw, partitions=[PartitionData(partitionIndex=1, errorCode=0, highWatermark=726, lastStableOffset=726, logStartOffset=0, divergingEpoch=EpochEndOffset(epoch=-1, endOffset=-1), currentLeader=LeaderIdAndEpoch(leaderId=-1, leaderEpoch=-1), snapshotId=SnapshotId(endOffset=-1, epoch=-1), abortedTransactions=null, preferredReadReplica=-1, records=MemoryRecords(size=5482, buffer=java.nio.HeapByteBuffer[pos=0 lim=5482 cap=5485]))])]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,273 TRACE || [Producer clientId=naument--statuses] All queue lengths are the same, not using adaptive for topic naument-status [org.apache.kafka.clients.producer.internals.BuiltInPartitioner] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(naument-status-1), implied=(naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Fetch READ_UNCOMMITTED at offset 725 for partition naument-status-1 returned fetch data PartitionData(partitionIndex=1, errorCode=0, highWatermark=726, lastStableOffset=726, logStartOffset=0, divergingEpoch=EpochEndOffset(epoch=-1, endOffset=-1), currentLeader=LeaderIdAndEpoch(leaderId=-1, leaderEpoch=-1), snapshotId=SnapshotId(endOffset=-1, epoch=-1), abortedTransactions=null, preferredReadReplica=-1, records=MemoryRecords(size=5482, buffer=java.nio.HeapByteBuffer[pos=0 lim=5482 cap=5485])) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Preparing to read 5482 bytes of data for partition naument-status-1 with offset FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Updating high watermark for partition naument-status-1 to 726 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Updating log start offset for partition naument-status-1 to 0 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Updating last stable offset for partition naument-status-1 to 726 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Returning 1 fetched records at offset FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} for assigned partition naument-status-1 [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Updating fetch position from FetchPosition{offset=725, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to FetchPosition{offset=726, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} for partition naument-status-1 and returning 1 records from `poll()` [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=726, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=739) for node 3. Added (), altered (aNsB1bK5TPelAbFsE1h5Bw:naument-status-1), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(naument-status-1), toForget=(), toReplace=(), implied=(naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,273 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2233, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=739, topics=[FetchTopic(topic='naument-status', topicId=aNsB1bK5TPelAbFsE1h5Bw, partitions=[FetchPartition(partition=1, currentLeaderEpoch=68, fetchOffset=726, lastFetchedEpoch=-1, logStartOffset=-1, partitionMaxBytes=104857600)])], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,274 TRACE || Received task vk_nau27_sink-0 status update Status{id=vk_nau27_sink-0, state=FAILED, workerId='172.18.0.10:8083', generation=15} [org.apache.kafka.connect.storage.KafkaStatusBackingStore] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,274 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483647 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,306 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,307 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,307 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:26,307 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:26,307 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,307 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2231, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,366 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=741) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,367 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483555 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,367 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2234, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=741, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,367 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483554 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=749, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=743) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,368 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147361985 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,368 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=750, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=743, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,369 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,369 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147361984 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2232, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=742) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,383 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483538 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,383 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2235, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=742, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,384 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483537 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,443 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2223, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1245341447, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 1245341447 with response=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-7 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-11 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-15 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-2 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-14 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-21 at position FetchPosition{offset=2, offsetEpoch=Optional[78], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-4 at position FetchPosition{offset=2, offsetEpoch=Optional[81], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-19 at position FetchPosition{offset=25, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1245341447, epoch=738) for node 1. Added (), altered (), removed (), replaced () out of (naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-7, naument-offset-11, naument-offset-15, naument-offset-21, naument-offset-19, naument-offset-4, naument-offset-2, naument-offset-14), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448812 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2226, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1245341447, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,444 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448812 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2224, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=266530269, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,444 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 266530269 with response=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-12 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-20 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-24 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-22 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-3 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-0 at position FetchPosition{offset=1, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-6 at position FetchPosition{offset=1, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-10 at position FetchPosition{offset=10, offsetEpoch=Optional[62], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-16 at position FetchPosition{offset=13, offsetEpoch=Optional[59], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=266530269, epoch=738) for node 2. Added (), altered (), removed (), replaced () out of (naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-3, naument-offset-0, naument-offset-6, naument-offset-12, naument-offset-10, naument-offset-16, naument-offset-20, naument-offset-24, naument-offset-22), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448812 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,445 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2227, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=266530269, sessionEpoch=738, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,445 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448811 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2225, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=1558804422, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 1558804422 with response=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-9 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-23 at position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-5 at position FetchPosition{offset=2, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-13 at position FetchPosition{offset=25, offsetEpoch=Optional[54], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-17 at position FetchPosition{offset=44, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-8 at position FetchPosition{offset=12, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-18 at position FetchPosition{offset=122, offsetEpoch=Optional[60], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-offset-1 at position FetchPosition{offset=8, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Built incremental fetch (sessionId=1558804422, epoch=739) for node 3. Added (), altered (), removed (), replaced () out of (naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-offset-5, naument-offset-9, naument-offset-13, naument-offset-17, naument-offset-23, naument-offset-8, naument-offset-18, naument-offset-1), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448589 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,667 DEBUG || [Consumer clientId=naument--offsets, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--offsets, correlationId=2228, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=1558804422, sessionEpoch=739, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-9 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-7 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-11 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-15 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-23 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-12 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-14 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-20 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-24 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-22 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-21 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-4 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-3 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-6 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-5 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-19 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-10 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-16 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-13 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-17 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-8 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-18 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Skipping fetch for partition naument-offset-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,667 TRACE || [Consumer clientId=naument--offsets, groupId=naument] Polling for fetches with timeout 2147448589 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 3 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2233, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=861588726, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 3 sent an incremental fetch response with throttleTimeMs = 0 for session 861588726 with response=(), implied=(naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-4 at position FetchPosition{offset=521, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-1 at position FetchPosition{offset=726, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker3:29092 (id: 3 rack: null)], epoch=68}} to node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=861588726, epoch=740) for node 3. Added (), altered (), removed (), replaced () out of (naument-status-1, naument-status-4) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-1, naument-status-4), canUseTopicIds=True) to broker broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker3:29092 (id: 3 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,774 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2236, headerVersion=2) and timeout 30000 to node 3: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=861588726, sessionEpoch=740, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,774 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483147 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,807 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,808 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,808 DEBUG Oracle|vk_nau27|oracle-connector-task checking for more records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,808 TRACE || WorkerSourceTask{id=vk_nau27_src-0} About to send 0 records to Kafka [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:26,808 TRACE || WorkerSourceTask{id=vk_nau27_src-0} Nothing to send to Kafka. Polling source for additional records [org.apache.kafka.connect.runtime.AbstractWorkerSourceTask] 2023-08-25 13:57:26,808 DEBUG Oracle|vk_nau27|oracle-connector-task polling records... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,808 DEBUG Oracle|vk_nau27|oracle-connector-task no records available or batch size not reached yet, sleeping a bit... [io.debezium.connector.base.ChangeEventQueue] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 1 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2234, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2003280166, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 1 sent an incremental fetch response with throttleTimeMs = 0 for session 2003280166 with response=(), implied=(naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-3 at position FetchPosition{offset=326, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-2 at position FetchPosition{offset=815, offsetEpoch=Optional[88], currentLeader=LeaderAndEpoch{leader=Optional[broker1:29092 (id: 1 rack: null)], epoch=88}} to node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=2003280166, epoch=742) for node 1. Added (), altered (), removed (), replaced () out of (naument-status-2, naument-status-3) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-2, naument-status-3), canUseTopicIds=True) to broker broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker1:29092 (id: 1 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483054 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,867 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2237, headerVersion=2) and timeout 30000 to node 1: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=2003280166, sessionEpoch=742, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,867 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483054 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=750, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=212244730, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 212244730 with response=(), implied=(naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-config-0 at position FetchPosition{offset=2368, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Built incremental fetch (sessionId=212244730, epoch=744) for node 2. Added (), altered (), removed (), replaced () out of (naument-config-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-config-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,870 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147361483 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,870 DEBUG || [Consumer clientId=naument--configs, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--configs, correlationId=751, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=212244730, sessionEpoch=744, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,870 TRACE || [Consumer clientId=naument--configs, groupId=naument] Skipping fetch for partition naument-config-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,870 TRACE || [Consumer clientId=naument--configs, groupId=naument] Polling for fetches with timeout 2147361483 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Received FETCH response from node 2 for request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2235, headerVersion=2): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=914301749, responses=[]) [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Node 2 sent an incremental fetch response with throttleTimeMs = 0 for session 914301749 with response=(), implied=(naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Removing pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Added READ_UNCOMMITTED fetch request for partition naument-status-0 at position FetchPosition{offset=478, offsetEpoch=Optional[68], currentLeader=LeaderAndEpoch{leader=Optional[broker2:29092 (id: 2 rack: null)], epoch=68}} to node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Built incremental fetch (sessionId=914301749, epoch=743) for node 2. Added (), altered (), removed (), replaced () out of (naument-status-0) [org.apache.kafka.clients.FetchSessionHandler] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(), toForget=(), toReplace=(), implied=(naument-status-0), canUseTopicIds=True) to broker broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Adding pending request for node broker2:29092 (id: 2 rack: null) [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483036 [org.apache.kafka.clients.consumer.KafkaConsumer] 2023-08-25 13:57:26,885 DEBUG || [Consumer clientId=naument--statuses, groupId=naument] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=13, clientId=naument--statuses, correlationId=2238, headerVersion=2) and timeout 30000 to node 2: FetchRequestData(clusterId=null, replicaId=-1, replicaState=ReplicaState(replicaId=-1, replicaEpoch=-1), maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=914301749, sessionEpoch=743, topics=[], forgottenTopicsData=[], rackId='') [org.apache.kafka.clients.NetworkClient] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-4 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-3 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-2 because previous request to broker1:29092 (id: 1 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-0 because previous request to broker2:29092 (id: 2 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Skipping fetch for partition naument-status-1 because previous request to broker3:29092 (id: 3 rack: null) has not been processed [org.apache.kafka.clients.consumer.internals.AbstractFetch] 2023-08-25 13:57:26,885 TRACE || [Consumer clientId=naument--statuses, groupId=naument] Polling for fetches with timeout 2147483036 [org.apache.kafka.clients.consumer.KafkaConsumer]